Index: llvm/lib/Target/PowerPC/PPCFrameLowering.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCFrameLowering.cpp +++ llvm/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -2286,13 +2286,15 @@ // slot for dynamic stack allocations. // The scavenger might be invoked if the frame offset does not fit into - // the 16-bit immediate. We don't know the complete frame size here - // because we've not yet computed callee-saved register spills or the - // needed alignment padding. + // the 16-bit immediate in case of not SPE and 8-bit in case of SPE. + // We don't know the complete frame size here because we've not yet computed + // callee-saved register spills or the needed alignment padding. unsigned StackSize = determineFrameLayout(MF, true); MachineFrameInfo &MFI = MF.getFrameInfo(); + bool NeedSpills = Subtarget.hasSPE() ? !isInt<8>(StackSize) : !isInt<16>(StackSize); + if (MFI.hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) || - (hasSpills(MF) && !isInt<16>(StackSize))) { + (hasSpills(MF) && NeedSpills)) { const TargetRegisterClass &GPRC = PPC::GPRCRegClass; const TargetRegisterClass &G8RC = PPC::G8RCRegClass; const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC; Index: llvm/test/CodeGen/PowerPC/register-pressure.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/register-pressure.ll @@ -0,0 +1,5619 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu \ +; RUN: -mattr=+spe | FileCheck %s -check-prefixes=CHECK,SPE + +target datalayout = "E-m:e-p:32:32-Fn32-i64:64-n32" +target triple = "ppc32" + +%struct.cmplx = type { double, double } + +; Function Attrs: noinline nounwind optnone uwtable +define dso_local i32 @main() #0 { +; CHECK-LABEL: main: +; CHECK: # %bb.0: +; CHECK-NEXT: mflr 0 +; CHECK-NEXT: stwu 1, -48(1) +; CHECK-NEXT: stw 31, 44(1) +; CHECK-NEXT: stw 0, 52(1) +; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: .cfi_offset r31, -4 +; CHECK-NEXT: .cfi_offset lr, 4 +; CHECK-NEXT: mr 31, 1 +; CHECK-NEXT: .cfi_def_cfa_register r31 +; CHECK-NEXT: li 3, 10 +; CHECK-NEXT: stw 3, 40(31) +; CHECK-NEXT: li 3, 0 +; CHECK-NEXT: stw 3, 28(31) +; CHECK-NEXT: lis 4, 16404 +; CHECK-NEXT: stw 4, 24(31) +; CHECK-NEXT: stw 3, 36(31) +; CHECK-NEXT: lis 3, 16420 +; CHECK-NEXT: stw 3, 32(31) +; CHECK-NEXT: lwz 3, 40(31) +; CHECK-NEXT: slwi 3, 3, 4 +; CHECK-NEXT: bl malloc +; CHECK-NEXT: stw 3, 20(31) +; CHECK-NEXT: addi 7, 31, 24 +; CHECK-NEXT: stw 7, 16(31) +; CHECK-NEXT: lwz 3, 20(31) +; CHECK-NEXT: stw 3, 12(31) +; CHECK-NEXT: lwz 5, 16(31) +; CHECK-NEXT: lwz 6, 12(31) +; CHECK-NEXT: li 3, 5 +; CHECK-NEXT: li 4, 1 +; CHECK-NEXT: li 8, 1 +; CHECK-NEXT: bl pass11 +; CHECK-NEXT: li 3, 0 +; CHECK-NEXT: lwz 0, 52(1) +; CHECK-NEXT: lwz 31, 44(1) +; CHECK-NEXT: addi 1, 1, 48 +; CHECK-NEXT: mtlr 0 +; CHECK-NEXT: blr + %1 = alloca i32, align 4 + %2 = alloca %struct.cmplx, align 8 + %3 = alloca ptr, align 4 + %4 = alloca ptr, align 4 + %5 = alloca ptr, align 4 + store i32 10, ptr %1, align 4 + %6 = getelementptr inbounds %struct.cmplx, ptr %2, i32 0, i32 0 + store double 5.000000e+00, ptr %6, align 8 + %7 = getelementptr inbounds %struct.cmplx, ptr %2, i32 0, i32 1 + store double 1.000000e+01, ptr %7, align 8 + %8 = load i32, ptr %1, align 4 + %9 = mul i32 %8, 16 + %10 = call ptr @malloc(i32 noundef %9) + store ptr %10, ptr %3, align 4 + store ptr %2, ptr %4, align 4 + %11 = load ptr, ptr %3, align 4 + store ptr %11, ptr %5, align 4 + %12 = load ptr, ptr %4, align 4 + %13 = load ptr, ptr %5, align 4 + call void @pass11(i32 noundef 5, i32 noundef 1, ptr noundef %12, ptr noundef %13, ptr noundef %2, i32 noundef 1) + ret i32 0 +} + +declare dso_local ptr @malloc(i32 noundef) #1 + +; Function Attrs: noinline nounwind optnone uwtable +define internal void @pass11(i32 noundef %0, i32 noundef %1, ptr noalias noundef %2, ptr noalias noundef %3, ptr noalias noundef %4, i32 noundef %5) #0 { +; CHECK-LABEL: pass11: +; CHECK: # %bb.0: +; CHECK-NEXT: stwu 1, -1088(1) +; CHECK-NEXT: stw 31, 1084(1) +; CHECK-NEXT: .cfi_def_cfa_offset 1088 +; CHECK-NEXT: .cfi_offset r31, -4 +; CHECK-NEXT: mr 31, 1 +; CHECK-NEXT: .cfi_def_cfa_register r31 +; CHECK-NEXT: .cfi_offset r15, -68 +; CHECK-NEXT: .cfi_offset r16, -208 +; CHECK-NEXT: .cfi_offset r17, -200 +; CHECK-NEXT: .cfi_offset r18, -192 +; CHECK-NEXT: .cfi_offset r19, -184 +; CHECK-NEXT: .cfi_offset r20, -176 +; CHECK-NEXT: .cfi_offset r21, -168 +; CHECK-NEXT: .cfi_offset r22, -160 +; CHECK-NEXT: .cfi_offset r23, -152 +; CHECK-NEXT: .cfi_offset r24, -144 +; CHECK-NEXT: .cfi_offset r25, -136 +; CHECK-NEXT: .cfi_offset r26, -128 +; CHECK-NEXT: .cfi_offset r27, -120 +; CHECK-NEXT: .cfi_offset r28, -112 +; CHECK-NEXT: .cfi_offset r29, -104 +; CHECK-NEXT: .cfi_offset r30, -8 +; CHECK-NEXT: stw 15, 1020(31) # 4-byte Folded Spill +; CHECK-NEXT: li 9, 880 +; CHECK-NEXT: evstddx 16, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 888 +; CHECK-NEXT: evstddx 17, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 896 +; CHECK-NEXT: evstddx 18, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 904 +; CHECK-NEXT: evstddx 19, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 912 +; CHECK-NEXT: evstddx 20, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 920 +; CHECK-NEXT: evstddx 21, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 928 +; CHECK-NEXT: evstddx 22, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 936 +; CHECK-NEXT: evstddx 23, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 944 +; CHECK-NEXT: evstddx 24, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 952 +; CHECK-NEXT: evstddx 25, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 960 +; CHECK-NEXT: evstddx 26, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 968 +; CHECK-NEXT: evstddx 27, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 976 +; CHECK-NEXT: evstddx 28, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: li 9, 984 +; CHECK-NEXT: evstddx 29, 31, 9 # 8-byte Folded Spill +; CHECK-NEXT: stw 30, 1080(31) # 4-byte Folded Spill +; CHECK-NEXT: stw 3, 876(31) +; CHECK-NEXT: stw 4, 872(31) +; CHECK-NEXT: stw 5, 868(31) +; CHECK-NEXT: stw 6, 864(31) +; CHECK-NEXT: stw 7, 860(31) +; CHECK-NEXT: stw 8, 856(31) +; CHECK-NEXT: li 3, 11 +; CHECK-NEXT: stw 3, 852(31) +; CHECK-NEXT: lis 3, -30876 +; CHECK-NEXT: ori 3, 3, 61626 +; CHECK-NEXT: stw 3, 844(31) +; CHECK-NEXT: lis 3, 16362 +; CHECK-NEXT: ori 3, 3, 60300 +; CHECK-NEXT: stw 3, 840(31) +; CHECK-NEXT: lwz 3, 856(31) +; CHECK-NEXT: efdcfsi 3, 3 +; CHECK-NEXT: li 4, .LCPI1_0@l +; CHECK-NEXT: lis 5, .LCPI1_0@ha +; CHECK-NEXT: evlddx 4, 5, 4 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: li 4, 832 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: lis 3, -9785 +; CHECK-NEXT: ori 3, 3, 4790 +; CHECK-NEXT: stw 3, 828(31) +; CHECK-NEXT: lis 3, 16346 +; CHECK-NEXT: ori 3, 3, 38440 +; CHECK-NEXT: stw 3, 824(31) +; CHECK-NEXT: lwz 3, 856(31) +; CHECK-NEXT: efdcfsi 3, 3 +; CHECK-NEXT: li 4, .LCPI1_1@l +; CHECK-NEXT: lis 5, .LCPI1_1@ha +; CHECK-NEXT: evlddx 4, 5, 4 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: li 4, 816 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: lis 3, 25615 +; CHECK-NEXT: ori 3, 3, 17627 +; CHECK-NEXT: stw 3, 812(31) +; CHECK-NEXT: lis 3, -16446 +; CHECK-NEXT: ori 3, 3, 14175 +; CHECK-NEXT: stw 3, 808(31) +; CHECK-NEXT: lwz 3, 856(31) +; CHECK-NEXT: efdcfsi 3, 3 +; CHECK-NEXT: li 4, .LCPI1_2@l +; CHECK-NEXT: lis 5, .LCPI1_2@ha +; CHECK-NEXT: evlddx 4, 5, 4 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: li 4, 800 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: lis 3, 32631 +; CHECK-NEXT: ori 3, 3, 22663 +; CHECK-NEXT: stw 3, 796(31) +; CHECK-NEXT: lis 3, -16412 +; CHECK-NEXT: ori 3, 3, 62622 +; CHECK-NEXT: stw 3, 792(31) +; CHECK-NEXT: lwz 3, 856(31) +; CHECK-NEXT: efdcfsi 3, 3 +; CHECK-NEXT: li 4, .LCPI1_3@l +; CHECK-NEXT: lis 5, .LCPI1_3@ha +; CHECK-NEXT: evlddx 4, 5, 4 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: li 4, 784 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: lis 3, -25651 +; CHECK-NEXT: ori 3, 3, 20567 +; CHECK-NEXT: stw 3, 780(31) +; CHECK-NEXT: lis 3, -16402 +; CHECK-NEXT: ori 3, 3, 46122 +; CHECK-NEXT: stw 3, 776(31) +; CHECK-NEXT: lwz 3, 856(31) +; CHECK-NEXT: efdcfsi 3, 3 +; CHECK-NEXT: li 4, .LCPI1_4@l +; CHECK-NEXT: lis 5, .LCPI1_4@ha +; CHECK-NEXT: evlddx 4, 5, 4 +; CHECK-NEXT: efdmul 3, 3, 4 +; CHECK-NEXT: li 4, 768 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: li 3, 0 +; CHECK-NEXT: stw 3, 764(31) +; CHECK-NEXT: b .LBB1_1 +; CHECK-NEXT: .LBB1_1: # =>This Loop Header: Depth=1 +; CHECK-NEXT: # Child Loop BB1_3 Depth 2 +; CHECK-NEXT: lwz 3, 764(31) +; CHECK-NEXT: lwz 4, 872(31) +; CHECK-NEXT: cmplw 3, 4 +; CHECK-NEXT: bge 0, .LBB1_8 +; CHECK-NEXT: b .LBB1_2 +; CHECK-NEXT: .LBB1_2: +; CHECK-NEXT: lwz 3, 868(31) +; CHECK-NEXT: lwz 4, 876(31) +; CHECK-NEXT: lwz 5, 764(31) +; CHECK-NEXT: mullw 4, 5, 4 +; CHECK-NEXT: mulli 4, 4, 176 +; CHECK-NEXT: lwzux 4, 3, 4 +; CHECK-NEXT: stw 4, 744(31) +; CHECK-NEXT: lwz 4, 12(3) +; CHECK-NEXT: stw 4, 756(31) +; CHECK-NEXT: lwz 4, 8(3) +; CHECK-NEXT: stw 4, 752(31) +; CHECK-NEXT: lwz 3, 4(3) +; CHECK-NEXT: stw 3, 748(31) +; CHECK-NEXT: lwz 3, 868(31) +; CHECK-NEXT: lwz 4, 876(31) +; CHECK-NEXT: lwz 5, 764(31) +; CHECK-NEXT: mulli 5, 5, 11 +; CHECK-NEXT: addi 6, 5, 1 +; CHECK-NEXT: mullw 6, 4, 6 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: evlddx 6, 3, 6 +; CHECK-NEXT: addi 5, 5, 10 +; CHECK-NEXT: mullw 4, 4, 5 +; CHECK-NEXT: slwi 4, 4, 4 +; CHECK-NEXT: evlddx 3, 3, 4 +; CHECK-NEXT: efdadd 3, 6, 3 +; CHECK-NEXT: li 4, 728 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: lwz 4, 868(31) +; CHECK-NEXT: lwz 5, 876(31) +; CHECK-NEXT: lwz 3, 764(31) +; CHECK-NEXT: mulli 6, 3, 11 +; CHECK-NEXT: addi 3, 6, 1 +; CHECK-NEXT: mullw 3, 5, 3 +; CHECK-NEXT: slwi 3, 3, 4 +; CHECK-NEXT: add 7, 4, 3 +; CHECK-NEXT: li 3, 8 +; CHECK-NEXT: evlddx 7, 7, 3 +; CHECK-NEXT: addi 6, 6, 10 +; CHECK-NEXT: mullw 5, 5, 6 +; CHECK-NEXT: slwi 5, 5, 4 +; CHECK-NEXT: add 4, 4, 5 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: efdadd 4, 7, 4 +; CHECK-NEXT: addi 5, 31, 728 +; CHECK-NEXT: evstddx 4, 5, 3 +; CHECK-NEXT: lwz 4, 868(31) +; CHECK-NEXT: lwz 6, 876(31) +; CHECK-NEXT: lwz 7, 764(31) +; CHECK-NEXT: mulli 7, 7, 11 +; CHECK-NEXT: addi 8, 7, 1 +; CHECK-NEXT: mullw 8, 6, 8 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: evlddx 8, 4, 8 +; CHECK-NEXT: addi 7, 7, 10 +; CHECK-NEXT: mullw 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: evlddx 4, 4, 6 +; CHECK-NEXT: efdsub 4, 8, 4 +; CHECK-NEXT: li 6, 584 +; CHECK-NEXT: evstddx 4, 31, 6 +; CHECK-NEXT: lwz 4, 868(31) +; CHECK-NEXT: lwz 6, 876(31) +; CHECK-NEXT: lwz 7, 764(31) +; CHECK-NEXT: mulli 7, 7, 11 +; CHECK-NEXT: addi 8, 7, 1 +; CHECK-NEXT: mullw 8, 6, 8 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: add 8, 4, 8 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: addi 7, 7, 10 +; CHECK-NEXT: mullw 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: add 4, 4, 6 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: efdsub 6, 8, 4 +; CHECK-NEXT: addi 4, 31, 584 +; CHECK-NEXT: evstddx 6, 4, 3 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 7, 876(31) +; CHECK-NEXT: lwz 8, 764(31) +; CHECK-NEXT: mulli 8, 8, 11 +; CHECK-NEXT: addi 9, 8, 2 +; CHECK-NEXT: mullw 9, 7, 9 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: evlddx 9, 6, 9 +; CHECK-NEXT: addi 8, 8, 9 +; CHECK-NEXT: mullw 7, 7, 8 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: evlddx 6, 6, 7 +; CHECK-NEXT: efdadd 6, 9, 6 +; CHECK-NEXT: li 7, 712 +; CHECK-NEXT: evstddx 6, 31, 7 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 7, 876(31) +; CHECK-NEXT: lwz 8, 764(31) +; CHECK-NEXT: mulli 8, 8, 11 +; CHECK-NEXT: addi 9, 8, 2 +; CHECK-NEXT: mullw 9, 7, 9 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: add 9, 6, 9 +; CHECK-NEXT: evlddx 9, 9, 3 +; CHECK-NEXT: addi 8, 8, 9 +; CHECK-NEXT: mullw 7, 7, 8 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: efdadd 6, 9, 6 +; CHECK-NEXT: addi 7, 31, 712 +; CHECK-NEXT: evstddx 6, 7, 3 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: mulli 9, 9, 11 +; CHECK-NEXT: addi 10, 9, 2 +; CHECK-NEXT: mullw 10, 8, 10 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: evlddx 10, 6, 10 +; CHECK-NEXT: addi 9, 9, 9 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: evlddx 6, 6, 8 +; CHECK-NEXT: efdsub 6, 10, 6 +; CHECK-NEXT: li 8, 600 +; CHECK-NEXT: evstddx 6, 31, 8 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: mulli 9, 9, 11 +; CHECK-NEXT: addi 10, 9, 2 +; CHECK-NEXT: mullw 10, 8, 10 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: add 10, 6, 10 +; CHECK-NEXT: evlddx 10, 10, 3 +; CHECK-NEXT: addi 9, 9, 9 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: add 6, 6, 8 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: efdsub 8, 10, 6 +; CHECK-NEXT: addi 6, 31, 600 +; CHECK-NEXT: evstddx 8, 6, 3 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 9, 876(31) +; CHECK-NEXT: lwz 10, 764(31) +; CHECK-NEXT: mulli 10, 10, 11 +; CHECK-NEXT: addi 11, 10, 3 +; CHECK-NEXT: mullw 11, 9, 11 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: evlddx 11, 8, 11 +; CHECK-NEXT: addi 10, 10, 8 +; CHECK-NEXT: mullw 9, 9, 10 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: evlddx 8, 8, 9 +; CHECK-NEXT: efdadd 8, 11, 8 +; CHECK-NEXT: li 9, 696 +; CHECK-NEXT: evstddx 8, 31, 9 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 9, 876(31) +; CHECK-NEXT: lwz 10, 764(31) +; CHECK-NEXT: mulli 10, 10, 11 +; CHECK-NEXT: addi 11, 10, 3 +; CHECK-NEXT: mullw 11, 9, 11 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: add 11, 8, 11 +; CHECK-NEXT: evlddx 11, 11, 3 +; CHECK-NEXT: addi 10, 10, 8 +; CHECK-NEXT: mullw 9, 9, 10 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: add 8, 8, 9 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: efdadd 8, 11, 8 +; CHECK-NEXT: addi 9, 31, 696 +; CHECK-NEXT: evstddx 8, 9, 3 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 10, 876(31) +; CHECK-NEXT: lwz 11, 764(31) +; CHECK-NEXT: mulli 11, 11, 11 +; CHECK-NEXT: addi 12, 11, 3 +; CHECK-NEXT: mullw 12, 10, 12 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: evlddx 12, 8, 12 +; CHECK-NEXT: addi 11, 11, 8 +; CHECK-NEXT: mullw 10, 10, 11 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: evlddx 8, 8, 10 +; CHECK-NEXT: efdsub 8, 12, 8 +; CHECK-NEXT: li 10, 616 +; CHECK-NEXT: evstddx 8, 31, 10 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 10, 876(31) +; CHECK-NEXT: lwz 11, 764(31) +; CHECK-NEXT: mulli 11, 11, 11 +; CHECK-NEXT: addi 12, 11, 3 +; CHECK-NEXT: mullw 12, 10, 12 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: add 12, 8, 12 +; CHECK-NEXT: evlddx 12, 12, 3 +; CHECK-NEXT: addi 11, 11, 8 +; CHECK-NEXT: mullw 10, 10, 11 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: add 8, 8, 10 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: efdsub 10, 12, 8 +; CHECK-NEXT: addi 8, 31, 616 +; CHECK-NEXT: evstddx 10, 8, 3 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 11, 876(31) +; CHECK-NEXT: lwz 12, 764(31) +; CHECK-NEXT: mulli 12, 12, 11 +; CHECK-NEXT: addi 0, 12, 4 +; CHECK-NEXT: mullw 0, 11, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 0, 10, 0 +; CHECK-NEXT: addi 12, 12, 7 +; CHECK-NEXT: mullw 11, 11, 12 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: evlddx 10, 10, 11 +; CHECK-NEXT: efdadd 10, 0, 10 +; CHECK-NEXT: li 11, 680 +; CHECK-NEXT: evstddx 10, 31, 11 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 11, 876(31) +; CHECK-NEXT: lwz 12, 764(31) +; CHECK-NEXT: mulli 12, 12, 11 +; CHECK-NEXT: addi 0, 12, 4 +; CHECK-NEXT: mullw 0, 11, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 30, 10, 0 +; CHECK-NEXT: evlddx 0, 30, 3 +; CHECK-NEXT: addi 12, 12, 7 +; CHECK-NEXT: mullw 11, 11, 12 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: add 10, 10, 11 +; CHECK-NEXT: evlddx 10, 10, 3 +; CHECK-NEXT: efdadd 10, 0, 10 +; CHECK-NEXT: addi 11, 31, 680 +; CHECK-NEXT: evstddx 10, 11, 3 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 12, 876(31) +; CHECK-NEXT: lwz 0, 764(31) +; CHECK-NEXT: mulli 30, 0, 11 +; CHECK-NEXT: addi 0, 30, 4 +; CHECK-NEXT: mullw 0, 12, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 0, 10, 0 +; CHECK-NEXT: addi 30, 30, 7 +; CHECK-NEXT: mullw 12, 12, 30 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: evlddx 10, 10, 12 +; CHECK-NEXT: efdsub 10, 0, 10 +; CHECK-NEXT: li 12, 632 +; CHECK-NEXT: evstddx 10, 31, 12 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 12, 876(31) +; CHECK-NEXT: lwz 0, 764(31) +; CHECK-NEXT: mulli 30, 0, 11 +; CHECK-NEXT: addi 0, 30, 4 +; CHECK-NEXT: mullw 0, 12, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 29, 10, 0 +; CHECK-NEXT: evlddx 0, 29, 3 +; CHECK-NEXT: addi 30, 30, 7 +; CHECK-NEXT: mullw 12, 12, 30 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: add 10, 10, 12 +; CHECK-NEXT: evlddx 10, 10, 3 +; CHECK-NEXT: efdsub 12, 0, 10 +; CHECK-NEXT: addi 10, 31, 632 +; CHECK-NEXT: evstddx 12, 10, 3 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 876(31) +; CHECK-NEXT: lwz 30, 764(31) +; CHECK-NEXT: mulli 30, 30, 11 +; CHECK-NEXT: addi 29, 30, 5 +; CHECK-NEXT: mullw 29, 0, 29 +; CHECK-NEXT: slwi 29, 29, 4 +; CHECK-NEXT: evlddx 29, 12, 29 +; CHECK-NEXT: addi 30, 30, 6 +; CHECK-NEXT: mullw 0, 0, 30 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 12, 12, 0 +; CHECK-NEXT: efdadd 12, 29, 12 +; CHECK-NEXT: li 30, 664 +; CHECK-NEXT: evstddx 12, 31, 30 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 876(31) +; CHECK-NEXT: lwz 30, 764(31) +; CHECK-NEXT: mulli 30, 30, 11 +; CHECK-NEXT: addi 29, 30, 5 +; CHECK-NEXT: mullw 29, 0, 29 +; CHECK-NEXT: slwi 29, 29, 4 +; CHECK-NEXT: add 29, 12, 29 +; CHECK-NEXT: evlddx 29, 29, 3 +; CHECK-NEXT: addi 30, 30, 6 +; CHECK-NEXT: mullw 0, 0, 30 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 12, 12, 0 +; CHECK-NEXT: evlddx 12, 12, 3 +; CHECK-NEXT: efdadd 12, 29, 12 +; CHECK-NEXT: addi 30, 31, 664 +; CHECK-NEXT: evstddx 12, 30, 3 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 876(31) +; CHECK-NEXT: lwz 29, 764(31) +; CHECK-NEXT: mulli 29, 29, 11 +; CHECK-NEXT: addi 28, 29, 5 +; CHECK-NEXT: mullw 28, 0, 28 +; CHECK-NEXT: slwi 28, 28, 4 +; CHECK-NEXT: evlddx 28, 12, 28 +; CHECK-NEXT: addi 29, 29, 6 +; CHECK-NEXT: mullw 0, 0, 29 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 12, 12, 0 +; CHECK-NEXT: efdsub 12, 28, 12 +; CHECK-NEXT: li 29, 648 +; CHECK-NEXT: evstddx 12, 31, 29 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 876(31) +; CHECK-NEXT: lwz 29, 764(31) +; CHECK-NEXT: mulli 29, 29, 11 +; CHECK-NEXT: addi 28, 29, 5 +; CHECK-NEXT: mullw 28, 0, 28 +; CHECK-NEXT: slwi 28, 28, 4 +; CHECK-NEXT: add 28, 12, 28 +; CHECK-NEXT: evlddx 28, 28, 3 +; CHECK-NEXT: addi 29, 29, 6 +; CHECK-NEXT: mullw 0, 0, 29 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 12, 12, 0 +; CHECK-NEXT: evlddx 12, 12, 3 +; CHECK-NEXT: efdsub 0, 28, 12 +; CHECK-NEXT: addi 12, 31, 648 +; CHECK-NEXT: evstddx 0, 12, 3 +; CHECK-NEXT: li 29, 744 +; CHECK-NEXT: evlddx 0, 31, 29 +; CHECK-NEXT: li 29, 728 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 712 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 696 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 680 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 664 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: lwz 29, 864(31) +; CHECK-NEXT: lwz 28, 876(31) +; CHECK-NEXT: lwz 27, 764(31) +; CHECK-NEXT: mullw 28, 28, 27 +; CHECK-NEXT: slwi 28, 28, 4 +; CHECK-NEXT: evstddx 0, 29, 28 +; CHECK-NEXT: addi 29, 31, 744 +; CHECK-NEXT: evlddx 0, 29, 3 +; CHECK-NEXT: evlddx 28, 5, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 7, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 9, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 11, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 30, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: lwz 28, 864(31) +; CHECK-NEXT: lwz 27, 876(31) +; CHECK-NEXT: lwz 26, 764(31) +; CHECK-NEXT: mullw 27, 27, 26 +; CHECK-NEXT: slwi 27, 27, 4 +; CHECK-NEXT: add 28, 28, 27 +; CHECK-NEXT: evstddx 0, 28, 3 +; CHECK-NEXT: li 28, 744 +; CHECK-NEXT: evlddx 28, 31, 28 +; CHECK-NEXT: li 27, 728 +; CHECK-NEXT: evlddx 27, 31, 27 +; CHECK-NEXT: li 0, .LCPI1_5@l +; CHECK-NEXT: lis 26, .LCPI1_5@ha +; CHECK-NEXT: evlddx 0, 26, 0 +; CHECK-NEXT: efdmul 27, 27, 0 +; CHECK-NEXT: efdadd 27, 27, 28 +; CHECK-NEXT: li 28, 712 +; CHECK-NEXT: evlddx 26, 31, 28 +; CHECK-NEXT: li 28, .LCPI1_6@l +; CHECK-NEXT: lis 25, .LCPI1_6@ha +; CHECK-NEXT: evlddx 28, 25, 28 +; CHECK-NEXT: efdmul 26, 26, 28 +; CHECK-NEXT: efdadd 26, 26, 27 +; CHECK-NEXT: li 27, 696 +; CHECK-NEXT: evlddx 25, 31, 27 +; CHECK-NEXT: li 27, .LCPI1_7@l +; CHECK-NEXT: lis 24, .LCPI1_7@ha +; CHECK-NEXT: evlddx 27, 24, 27 +; CHECK-NEXT: efdmul 25, 25, 27 +; CHECK-NEXT: efdadd 26, 25, 26 +; CHECK-NEXT: li 25, 680 +; CHECK-NEXT: evlddx 24, 31, 25 +; CHECK-NEXT: li 25, .LCPI1_8@l +; CHECK-NEXT: lis 23, .LCPI1_8@ha +; CHECK-NEXT: evlddx 25, 23, 25 +; CHECK-NEXT: efdmul 24, 24, 25 +; CHECK-NEXT: efdadd 24, 24, 26 +; CHECK-NEXT: li 26, 664 +; CHECK-NEXT: evlddx 23, 31, 26 +; CHECK-NEXT: li 26, .LCPI1_9@l +; CHECK-NEXT: lis 22, .LCPI1_9@ha +; CHECK-NEXT: evlddx 26, 22, 26 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 568 +; CHECK-NEXT: evstddx 24, 31, 23 +; CHECK-NEXT: evlddx 24, 29, 3 +; CHECK-NEXT: evlddx 23, 5, 3 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 7, 3 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 9, 3 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 11, 3 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 30, 3 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 23, 23, 24 +; CHECK-NEXT: addi 24, 31, 568 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: li 23, 832 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: li 22, 584 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 816 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: li 20, 600 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdmul 23, 23, 22 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: li 22, 800 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 616 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: li 22, 784 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 632 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: li 22, 768 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 648 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 22, 22, 23 +; CHECK-NEXT: addi 23, 31, 552 +; CHECK-NEXT: evstddx 22, 23, 3 +; CHECK-NEXT: li 22, 832 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: evlddx 21, 4, 3 +; CHECK-NEXT: li 20, 816 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: evlddx 19, 6, 3 +; CHECK-NEXT: efdmul 20, 20, 19 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 22, 22, 20 +; CHECK-NEXT: li 21, 800 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 8, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 22, 21, 22 +; CHECK-NEXT: li 21, 784 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 10, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 22, 21, 22 +; CHECK-NEXT: li 21, 768 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 12, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 22, 21, 22 +; CHECK-NEXT: efdneg 22, 22 +; CHECK-NEXT: li 21, 552 +; CHECK-NEXT: evstddx 22, 31, 21 +; CHECK-NEXT: li 22, 568 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 552 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdadd 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: evlddx 22, 24, 3 +; CHECK-NEXT: evlddx 21, 23, 3 +; CHECK-NEXT: efdadd 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: add 21, 21, 20 +; CHECK-NEXT: evstddx 22, 21, 3 +; CHECK-NEXT: li 22, 568 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 552 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: mulli 18, 18, 10 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: evlddx 23, 23, 3 +; CHECK-NEXT: efdsub 24, 24, 23 +; CHECK-NEXT: lwz 23, 864(31) +; CHECK-NEXT: lwz 22, 876(31) +; CHECK-NEXT: lwz 21, 764(31) +; CHECK-NEXT: lwz 20, 872(31) +; CHECK-NEXT: mulli 20, 20, 10 +; CHECK-NEXT: add 21, 21, 20 +; CHECK-NEXT: mullw 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 23, 23, 22 +; CHECK-NEXT: evstddx 24, 23, 3 +; CHECK-NEXT: li 24, 744 +; CHECK-NEXT: evlddx 24, 31, 24 +; CHECK-NEXT: li 23, 728 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 712 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 696 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 680 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 664 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 536 +; CHECK-NEXT: evstddx 24, 31, 23 +; CHECK-NEXT: evlddx 24, 29, 3 +; CHECK-NEXT: evlddx 23, 5, 3 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 7, 3 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 9, 3 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 11, 3 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 30, 3 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 23, 23, 24 +; CHECK-NEXT: addi 24, 31, 536 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: li 23, 816 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: li 22, 584 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 784 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: li 20, 600 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdmul 23, 23, 22 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: li 22, 768 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 616 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: li 22, 800 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 632 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: li 22, 832 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 648 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 22, 23, 22 +; CHECK-NEXT: addi 23, 31, 520 +; CHECK-NEXT: evstddx 22, 23, 3 +; CHECK-NEXT: li 22, 816 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: evlddx 21, 4, 3 +; CHECK-NEXT: li 20, 784 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: evlddx 19, 6, 3 +; CHECK-NEXT: efdmul 20, 20, 19 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 22, 22, 20 +; CHECK-NEXT: li 21, 768 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 8, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: li 21, 800 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 10, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: li 21, 832 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 12, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: efdneg 22, 22 +; CHECK-NEXT: li 21, 520 +; CHECK-NEXT: evstddx 22, 31, 21 +; CHECK-NEXT: li 22, 536 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 520 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdadd 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: slwi 18, 18, 1 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: evlddx 22, 24, 3 +; CHECK-NEXT: evlddx 21, 23, 3 +; CHECK-NEXT: efdadd 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: slwi 18, 18, 1 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: add 21, 21, 20 +; CHECK-NEXT: evstddx 22, 21, 3 +; CHECK-NEXT: li 22, 536 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 520 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: mulli 18, 18, 9 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: evlddx 23, 23, 3 +; CHECK-NEXT: efdsub 24, 24, 23 +; CHECK-NEXT: lwz 23, 864(31) +; CHECK-NEXT: lwz 22, 876(31) +; CHECK-NEXT: lwz 21, 764(31) +; CHECK-NEXT: lwz 20, 872(31) +; CHECK-NEXT: mulli 20, 20, 9 +; CHECK-NEXT: add 21, 21, 20 +; CHECK-NEXT: mullw 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 23, 23, 22 +; CHECK-NEXT: evstddx 24, 23, 3 +; CHECK-NEXT: li 24, 744 +; CHECK-NEXT: evlddx 24, 31, 24 +; CHECK-NEXT: li 23, 728 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 712 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 696 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 680 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 664 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 504 +; CHECK-NEXT: evstddx 24, 31, 23 +; CHECK-NEXT: evlddx 24, 29, 3 +; CHECK-NEXT: evlddx 23, 5, 3 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 7, 3 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 9, 3 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 11, 3 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 30, 3 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 23, 23, 24 +; CHECK-NEXT: addi 24, 31, 504 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: li 23, 800 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: li 22, 584 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 768 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: li 20, 600 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdmul 23, 23, 22 +; CHECK-NEXT: efdsub 23, 23, 21 +; CHECK-NEXT: li 22, 816 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 616 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: li 22, 832 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 632 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: li 22, 784 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 648 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 22, 22, 23 +; CHECK-NEXT: addi 23, 31, 488 +; CHECK-NEXT: evstddx 22, 23, 3 +; CHECK-NEXT: li 22, 800 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: evlddx 21, 4, 3 +; CHECK-NEXT: li 20, 768 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: evlddx 19, 6, 3 +; CHECK-NEXT: efdmul 20, 20, 19 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 22, 22, 20 +; CHECK-NEXT: li 21, 816 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 8, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: li 21, 832 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 10, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 22, 21, 22 +; CHECK-NEXT: li 21, 784 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 12, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 22, 21, 22 +; CHECK-NEXT: efdneg 22, 22 +; CHECK-NEXT: li 21, 488 +; CHECK-NEXT: evstddx 22, 31, 21 +; CHECK-NEXT: li 22, 504 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 488 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdadd 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: mulli 18, 18, 3 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: evlddx 22, 24, 3 +; CHECK-NEXT: evlddx 21, 23, 3 +; CHECK-NEXT: efdadd 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: mulli 18, 18, 3 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: add 21, 21, 20 +; CHECK-NEXT: evstddx 22, 21, 3 +; CHECK-NEXT: li 22, 504 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 488 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdsub 22, 22, 21 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: slwi 18, 18, 3 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: evstddx 22, 21, 20 +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: evlddx 23, 23, 3 +; CHECK-NEXT: efdsub 24, 24, 23 +; CHECK-NEXT: lwz 23, 864(31) +; CHECK-NEXT: lwz 22, 876(31) +; CHECK-NEXT: lwz 21, 764(31) +; CHECK-NEXT: lwz 20, 872(31) +; CHECK-NEXT: slwi 20, 20, 3 +; CHECK-NEXT: add 21, 21, 20 +; CHECK-NEXT: mullw 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 23, 23, 22 +; CHECK-NEXT: evstddx 24, 23, 3 +; CHECK-NEXT: li 24, 744 +; CHECK-NEXT: evlddx 24, 31, 24 +; CHECK-NEXT: li 23, 728 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 712 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 696 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 680 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 664 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 472 +; CHECK-NEXT: evstddx 24, 31, 23 +; CHECK-NEXT: evlddx 29, 29, 3 +; CHECK-NEXT: evlddx 5, 5, 3 +; CHECK-NEXT: efdmul 5, 5, 25 +; CHECK-NEXT: efdadd 5, 5, 29 +; CHECK-NEXT: evlddx 7, 7, 3 +; CHECK-NEXT: efdmul 7, 7, 27 +; CHECK-NEXT: efdadd 5, 7, 5 +; CHECK-NEXT: evlddx 7, 9, 3 +; CHECK-NEXT: efdmul 7, 7, 0 +; CHECK-NEXT: efdadd 5, 7, 5 +; CHECK-NEXT: evlddx 7, 11, 3 +; CHECK-NEXT: efdmul 7, 7, 26 +; CHECK-NEXT: efdadd 5, 7, 5 +; CHECK-NEXT: evlddx 7, 30, 3 +; CHECK-NEXT: efdmul 7, 7, 28 +; CHECK-NEXT: efdadd 7, 7, 5 +; CHECK-NEXT: addi 5, 31, 472 +; CHECK-NEXT: evstddx 7, 5, 3 +; CHECK-NEXT: li 7, 784 +; CHECK-NEXT: evlddx 7, 31, 7 +; CHECK-NEXT: li 9, 584 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 800 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: li 30, 600 +; CHECK-NEXT: evlddx 0, 31, 30 +; CHECK-NEXT: efdmul 11, 11, 0 +; CHECK-NEXT: efdmul 7, 7, 9 +; CHECK-NEXT: efdsub 7, 7, 11 +; CHECK-NEXT: li 9, 832 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 616 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: efdmul 9, 9, 11 +; CHECK-NEXT: efdadd 7, 9, 7 +; CHECK-NEXT: li 9, 768 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 632 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: efdmul 9, 9, 11 +; CHECK-NEXT: efdadd 7, 9, 7 +; CHECK-NEXT: li 9, 816 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 648 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: efdmul 9, 9, 11 +; CHECK-NEXT: efdsub 9, 7, 9 +; CHECK-NEXT: addi 7, 31, 456 +; CHECK-NEXT: evstddx 9, 7, 3 +; CHECK-NEXT: li 9, 784 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: li 11, 800 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: efdmul 6, 11, 6 +; CHECK-NEXT: efdmul 4, 9, 4 +; CHECK-NEXT: efdsub 4, 4, 6 +; CHECK-NEXT: li 6, 832 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: efdmul 6, 6, 8 +; CHECK-NEXT: efdadd 4, 6, 4 +; CHECK-NEXT: li 6, 768 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: evlddx 8, 10, 3 +; CHECK-NEXT: efdmul 6, 6, 8 +; CHECK-NEXT: efdadd 4, 6, 4 +; CHECK-NEXT: li 6, 816 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: evlddx 8, 12, 3 +; CHECK-NEXT: efdmul 6, 6, 8 +; CHECK-NEXT: efdsub 4, 4, 6 +; CHECK-NEXT: efdneg 4, 4 +; CHECK-NEXT: li 6, 456 +; CHECK-NEXT: evstddx 4, 31, 6 +; CHECK-NEXT: li 4, 472 +; CHECK-NEXT: evlddx 4, 31, 4 +; CHECK-NEXT: li 6, 456 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: efdadd 4, 4, 6 +; CHECK-NEXT: lwz 6, 864(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: lwz 10, 872(31) +; CHECK-NEXT: slwi 10, 10, 2 +; CHECK-NEXT: add 9, 9, 10 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: evstddx 4, 6, 8 +; CHECK-NEXT: evlddx 4, 5, 3 +; CHECK-NEXT: evlddx 6, 7, 3 +; CHECK-NEXT: efdadd 4, 4, 6 +; CHECK-NEXT: lwz 6, 864(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: lwz 10, 872(31) +; CHECK-NEXT: slwi 10, 10, 2 +; CHECK-NEXT: add 9, 9, 10 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: add 6, 6, 8 +; CHECK-NEXT: evstddx 4, 6, 3 +; CHECK-NEXT: li 4, 472 +; CHECK-NEXT: evlddx 4, 31, 4 +; CHECK-NEXT: li 6, 456 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: efdsub 4, 4, 6 +; CHECK-NEXT: lwz 6, 864(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: lwz 10, 872(31) +; CHECK-NEXT: mulli 10, 10, 7 +; CHECK-NEXT: add 9, 9, 10 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: evstddx 4, 6, 8 +; CHECK-NEXT: evlddx 4, 5, 3 +; CHECK-NEXT: evlddx 5, 7, 3 +; CHECK-NEXT: efdsub 4, 4, 5 +; CHECK-NEXT: lwz 5, 864(31) +; CHECK-NEXT: lwz 6, 876(31) +; CHECK-NEXT: lwz 7, 764(31) +; CHECK-NEXT: lwz 8, 872(31) +; CHECK-NEXT: mulli 8, 8, 7 +; CHECK-NEXT: add 7, 7, 8 +; CHECK-NEXT: mullw 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: add 5, 5, 6 +; CHECK-NEXT: evstddx 4, 5, 3 +; CHECK-NEXT: li 3, 1 +; CHECK-NEXT: stw 3, 452(31) +; CHECK-NEXT: b .LBB1_3 +; CHECK-NEXT: .LBB1_3: # Parent Loop BB1_1 Depth=1 +; CHECK-NEXT: # => This Inner Loop Header: Depth=2 +; CHECK-NEXT: lwz 3, 452(31) +; CHECK-NEXT: lwz 4, 876(31) +; CHECK-NEXT: cmplw 3, 4 +; CHECK-NEXT: bge 0, .LBB1_6 +; CHECK-NEXT: b .LBB1_4 +; CHECK-NEXT: .LBB1_4: +; CHECK-NEXT: lwz 3, 868(31) +; CHECK-NEXT: lwz 4, 452(31) +; CHECK-NEXT: lwz 5, 876(31) +; CHECK-NEXT: lwz 6, 764(31) +; CHECK-NEXT: mullw 5, 6, 5 +; CHECK-NEXT: mulli 5, 5, 11 +; CHECK-NEXT: add 4, 4, 5 +; CHECK-NEXT: slwi 4, 4, 4 +; CHECK-NEXT: lwzux 4, 3, 4 +; CHECK-NEXT: stw 4, 432(31) +; CHECK-NEXT: lwz 4, 12(3) +; CHECK-NEXT: stw 4, 444(31) +; CHECK-NEXT: lwz 4, 8(3) +; CHECK-NEXT: stw 4, 440(31) +; CHECK-NEXT: lwz 3, 4(3) +; CHECK-NEXT: stw 3, 436(31) +; CHECK-NEXT: lwz 3, 868(31) +; CHECK-NEXT: lwz 4, 452(31) +; CHECK-NEXT: lwz 5, 876(31) +; CHECK-NEXT: lwz 6, 764(31) +; CHECK-NEXT: mulli 6, 6, 11 +; CHECK-NEXT: addi 7, 6, 1 +; CHECK-NEXT: mullw 7, 5, 7 +; CHECK-NEXT: add 7, 4, 7 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: evlddx 7, 3, 7 +; CHECK-NEXT: addi 6, 6, 10 +; CHECK-NEXT: mullw 5, 5, 6 +; CHECK-NEXT: add 4, 4, 5 +; CHECK-NEXT: slwi 4, 4, 4 +; CHECK-NEXT: evlddx 3, 3, 4 +; CHECK-NEXT: efdadd 3, 7, 3 +; CHECK-NEXT: li 4, 416 +; CHECK-NEXT: evstddx 3, 31, 4 +; CHECK-NEXT: lwz 4, 868(31) +; CHECK-NEXT: lwz 5, 452(31) +; CHECK-NEXT: lwz 6, 876(31) +; CHECK-NEXT: lwz 3, 764(31) +; CHECK-NEXT: mulli 7, 3, 11 +; CHECK-NEXT: addi 3, 7, 1 +; CHECK-NEXT: mullw 3, 6, 3 +; CHECK-NEXT: add 3, 5, 3 +; CHECK-NEXT: slwi 3, 3, 4 +; CHECK-NEXT: add 8, 4, 3 +; CHECK-NEXT: li 3, 8 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: addi 7, 7, 10 +; CHECK-NEXT: mullw 6, 6, 7 +; CHECK-NEXT: add 5, 5, 6 +; CHECK-NEXT: slwi 5, 5, 4 +; CHECK-NEXT: add 4, 4, 5 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: efdadd 4, 8, 4 +; CHECK-NEXT: addi 5, 31, 416 +; CHECK-NEXT: evstddx 4, 5, 3 +; CHECK-NEXT: lwz 4, 868(31) +; CHECK-NEXT: lwz 6, 452(31) +; CHECK-NEXT: lwz 7, 876(31) +; CHECK-NEXT: lwz 8, 764(31) +; CHECK-NEXT: mulli 8, 8, 11 +; CHECK-NEXT: addi 9, 8, 1 +; CHECK-NEXT: mullw 9, 7, 9 +; CHECK-NEXT: add 9, 6, 9 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: evlddx 9, 4, 9 +; CHECK-NEXT: addi 8, 8, 10 +; CHECK-NEXT: mullw 7, 7, 8 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: evlddx 4, 4, 6 +; CHECK-NEXT: efdsub 4, 9, 4 +; CHECK-NEXT: li 6, 272 +; CHECK-NEXT: evstddx 4, 31, 6 +; CHECK-NEXT: lwz 4, 868(31) +; CHECK-NEXT: lwz 6, 452(31) +; CHECK-NEXT: lwz 7, 876(31) +; CHECK-NEXT: lwz 8, 764(31) +; CHECK-NEXT: mulli 8, 8, 11 +; CHECK-NEXT: addi 9, 8, 1 +; CHECK-NEXT: mullw 9, 7, 9 +; CHECK-NEXT: add 9, 6, 9 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: add 9, 4, 9 +; CHECK-NEXT: evlddx 9, 9, 3 +; CHECK-NEXT: addi 8, 8, 10 +; CHECK-NEXT: mullw 7, 7, 8 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: add 4, 4, 6 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: efdsub 6, 9, 4 +; CHECK-NEXT: addi 4, 31, 272 +; CHECK-NEXT: evstddx 6, 4, 3 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 7, 452(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: mulli 9, 9, 11 +; CHECK-NEXT: addi 10, 9, 2 +; CHECK-NEXT: mullw 10, 8, 10 +; CHECK-NEXT: add 10, 7, 10 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: evlddx 10, 6, 10 +; CHECK-NEXT: addi 9, 9, 9 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: add 7, 7, 8 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: evlddx 6, 6, 7 +; CHECK-NEXT: efdadd 6, 10, 6 +; CHECK-NEXT: li 7, 400 +; CHECK-NEXT: evstddx 6, 31, 7 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 7, 452(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: mulli 9, 9, 11 +; CHECK-NEXT: addi 10, 9, 2 +; CHECK-NEXT: mullw 10, 8, 10 +; CHECK-NEXT: add 10, 7, 10 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: add 10, 6, 10 +; CHECK-NEXT: evlddx 10, 10, 3 +; CHECK-NEXT: addi 9, 9, 9 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: add 7, 7, 8 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: efdadd 6, 10, 6 +; CHECK-NEXT: addi 7, 31, 400 +; CHECK-NEXT: evstddx 6, 7, 3 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 8, 452(31) +; CHECK-NEXT: lwz 9, 876(31) +; CHECK-NEXT: lwz 10, 764(31) +; CHECK-NEXT: mulli 10, 10, 11 +; CHECK-NEXT: addi 11, 10, 2 +; CHECK-NEXT: mullw 11, 9, 11 +; CHECK-NEXT: add 11, 8, 11 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: evlddx 11, 6, 11 +; CHECK-NEXT: addi 10, 10, 9 +; CHECK-NEXT: mullw 9, 9, 10 +; CHECK-NEXT: add 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: evlddx 6, 6, 8 +; CHECK-NEXT: efdsub 6, 11, 6 +; CHECK-NEXT: li 8, 288 +; CHECK-NEXT: evstddx 6, 31, 8 +; CHECK-NEXT: lwz 6, 868(31) +; CHECK-NEXT: lwz 8, 452(31) +; CHECK-NEXT: lwz 9, 876(31) +; CHECK-NEXT: lwz 10, 764(31) +; CHECK-NEXT: mulli 10, 10, 11 +; CHECK-NEXT: addi 11, 10, 2 +; CHECK-NEXT: mullw 11, 9, 11 +; CHECK-NEXT: add 11, 8, 11 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: add 11, 6, 11 +; CHECK-NEXT: evlddx 11, 11, 3 +; CHECK-NEXT: addi 10, 10, 9 +; CHECK-NEXT: mullw 9, 9, 10 +; CHECK-NEXT: add 8, 8, 9 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: add 6, 6, 8 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: efdsub 8, 11, 6 +; CHECK-NEXT: addi 6, 31, 288 +; CHECK-NEXT: evstddx 8, 6, 3 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 9, 452(31) +; CHECK-NEXT: lwz 10, 876(31) +; CHECK-NEXT: lwz 11, 764(31) +; CHECK-NEXT: mulli 11, 11, 11 +; CHECK-NEXT: addi 12, 11, 3 +; CHECK-NEXT: mullw 12, 10, 12 +; CHECK-NEXT: add 12, 9, 12 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: evlddx 12, 8, 12 +; CHECK-NEXT: addi 11, 11, 8 +; CHECK-NEXT: mullw 10, 10, 11 +; CHECK-NEXT: add 9, 9, 10 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: evlddx 8, 8, 9 +; CHECK-NEXT: efdadd 8, 12, 8 +; CHECK-NEXT: li 9, 384 +; CHECK-NEXT: evstddx 8, 31, 9 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 9, 452(31) +; CHECK-NEXT: lwz 10, 876(31) +; CHECK-NEXT: lwz 11, 764(31) +; CHECK-NEXT: mulli 11, 11, 11 +; CHECK-NEXT: addi 12, 11, 3 +; CHECK-NEXT: mullw 12, 10, 12 +; CHECK-NEXT: add 12, 9, 12 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: add 12, 8, 12 +; CHECK-NEXT: evlddx 12, 12, 3 +; CHECK-NEXT: addi 11, 11, 8 +; CHECK-NEXT: mullw 10, 10, 11 +; CHECK-NEXT: add 9, 9, 10 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: add 8, 8, 9 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: efdadd 8, 12, 8 +; CHECK-NEXT: addi 9, 31, 384 +; CHECK-NEXT: evstddx 8, 9, 3 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 10, 452(31) +; CHECK-NEXT: lwz 11, 876(31) +; CHECK-NEXT: lwz 12, 764(31) +; CHECK-NEXT: mulli 12, 12, 11 +; CHECK-NEXT: addi 0, 12, 3 +; CHECK-NEXT: mullw 0, 11, 0 +; CHECK-NEXT: add 0, 10, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 0, 8, 0 +; CHECK-NEXT: addi 12, 12, 8 +; CHECK-NEXT: mullw 11, 11, 12 +; CHECK-NEXT: add 10, 10, 11 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: evlddx 8, 8, 10 +; CHECK-NEXT: efdsub 8, 0, 8 +; CHECK-NEXT: li 10, 304 +; CHECK-NEXT: evstddx 8, 31, 10 +; CHECK-NEXT: lwz 8, 868(31) +; CHECK-NEXT: lwz 10, 452(31) +; CHECK-NEXT: lwz 11, 876(31) +; CHECK-NEXT: lwz 12, 764(31) +; CHECK-NEXT: mulli 12, 12, 11 +; CHECK-NEXT: addi 0, 12, 3 +; CHECK-NEXT: mullw 0, 11, 0 +; CHECK-NEXT: add 0, 10, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 30, 8, 0 +; CHECK-NEXT: evlddx 0, 30, 3 +; CHECK-NEXT: addi 12, 12, 8 +; CHECK-NEXT: mullw 11, 11, 12 +; CHECK-NEXT: add 10, 10, 11 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: add 8, 8, 10 +; CHECK-NEXT: evlddx 8, 8, 3 +; CHECK-NEXT: efdsub 10, 0, 8 +; CHECK-NEXT: addi 8, 31, 304 +; CHECK-NEXT: evstddx 10, 8, 3 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 11, 452(31) +; CHECK-NEXT: lwz 12, 876(31) +; CHECK-NEXT: lwz 0, 764(31) +; CHECK-NEXT: mulli 30, 0, 11 +; CHECK-NEXT: addi 0, 30, 4 +; CHECK-NEXT: mullw 0, 12, 0 +; CHECK-NEXT: add 0, 11, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 0, 10, 0 +; CHECK-NEXT: addi 30, 30, 7 +; CHECK-NEXT: mullw 12, 12, 30 +; CHECK-NEXT: add 11, 11, 12 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: evlddx 10, 10, 11 +; CHECK-NEXT: efdadd 10, 0, 10 +; CHECK-NEXT: li 11, 368 +; CHECK-NEXT: evstddx 10, 31, 11 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 11, 452(31) +; CHECK-NEXT: lwz 12, 876(31) +; CHECK-NEXT: lwz 0, 764(31) +; CHECK-NEXT: mulli 30, 0, 11 +; CHECK-NEXT: addi 0, 30, 4 +; CHECK-NEXT: mullw 0, 12, 0 +; CHECK-NEXT: add 0, 11, 0 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 29, 10, 0 +; CHECK-NEXT: evlddx 0, 29, 3 +; CHECK-NEXT: addi 30, 30, 7 +; CHECK-NEXT: mullw 12, 12, 30 +; CHECK-NEXT: add 11, 11, 12 +; CHECK-NEXT: slwi 11, 11, 4 +; CHECK-NEXT: add 10, 10, 11 +; CHECK-NEXT: evlddx 10, 10, 3 +; CHECK-NEXT: efdadd 10, 0, 10 +; CHECK-NEXT: addi 11, 31, 368 +; CHECK-NEXT: evstddx 10, 11, 3 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 12, 452(31) +; CHECK-NEXT: lwz 0, 876(31) +; CHECK-NEXT: lwz 30, 764(31) +; CHECK-NEXT: mulli 30, 30, 11 +; CHECK-NEXT: addi 29, 30, 4 +; CHECK-NEXT: mullw 29, 0, 29 +; CHECK-NEXT: add 29, 12, 29 +; CHECK-NEXT: slwi 29, 29, 4 +; CHECK-NEXT: evlddx 29, 10, 29 +; CHECK-NEXT: addi 30, 30, 7 +; CHECK-NEXT: mullw 0, 0, 30 +; CHECK-NEXT: add 12, 12, 0 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: evlddx 10, 10, 12 +; CHECK-NEXT: efdsub 10, 29, 10 +; CHECK-NEXT: li 12, 320 +; CHECK-NEXT: evstddx 10, 31, 12 +; CHECK-NEXT: lwz 10, 868(31) +; CHECK-NEXT: lwz 12, 452(31) +; CHECK-NEXT: lwz 0, 876(31) +; CHECK-NEXT: lwz 30, 764(31) +; CHECK-NEXT: mulli 30, 30, 11 +; CHECK-NEXT: addi 29, 30, 4 +; CHECK-NEXT: mullw 29, 0, 29 +; CHECK-NEXT: add 29, 12, 29 +; CHECK-NEXT: slwi 29, 29, 4 +; CHECK-NEXT: add 29, 10, 29 +; CHECK-NEXT: evlddx 29, 29, 3 +; CHECK-NEXT: addi 30, 30, 7 +; CHECK-NEXT: mullw 0, 0, 30 +; CHECK-NEXT: add 12, 12, 0 +; CHECK-NEXT: slwi 12, 12, 4 +; CHECK-NEXT: add 10, 10, 12 +; CHECK-NEXT: evlddx 10, 10, 3 +; CHECK-NEXT: efdsub 12, 29, 10 +; CHECK-NEXT: addi 10, 31, 320 +; CHECK-NEXT: evstddx 12, 10, 3 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 452(31) +; CHECK-NEXT: lwz 30, 876(31) +; CHECK-NEXT: lwz 29, 764(31) +; CHECK-NEXT: mulli 29, 29, 11 +; CHECK-NEXT: addi 28, 29, 5 +; CHECK-NEXT: mullw 28, 30, 28 +; CHECK-NEXT: add 28, 0, 28 +; CHECK-NEXT: slwi 28, 28, 4 +; CHECK-NEXT: evlddx 28, 12, 28 +; CHECK-NEXT: addi 29, 29, 6 +; CHECK-NEXT: mullw 30, 30, 29 +; CHECK-NEXT: add 0, 0, 30 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 12, 12, 0 +; CHECK-NEXT: efdadd 12, 28, 12 +; CHECK-NEXT: li 30, 352 +; CHECK-NEXT: evstddx 12, 31, 30 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 452(31) +; CHECK-NEXT: lwz 30, 876(31) +; CHECK-NEXT: lwz 29, 764(31) +; CHECK-NEXT: mulli 29, 29, 11 +; CHECK-NEXT: addi 28, 29, 5 +; CHECK-NEXT: mullw 28, 30, 28 +; CHECK-NEXT: add 28, 0, 28 +; CHECK-NEXT: slwi 28, 28, 4 +; CHECK-NEXT: add 28, 12, 28 +; CHECK-NEXT: evlddx 28, 28, 3 +; CHECK-NEXT: addi 29, 29, 6 +; CHECK-NEXT: mullw 30, 30, 29 +; CHECK-NEXT: add 0, 0, 30 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 12, 12, 0 +; CHECK-NEXT: evlddx 12, 12, 3 +; CHECK-NEXT: efdadd 12, 28, 12 +; CHECK-NEXT: addi 30, 31, 352 +; CHECK-NEXT: evstddx 12, 30, 3 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 452(31) +; CHECK-NEXT: lwz 29, 876(31) +; CHECK-NEXT: lwz 28, 764(31) +; CHECK-NEXT: mulli 28, 28, 11 +; CHECK-NEXT: addi 27, 28, 5 +; CHECK-NEXT: mullw 27, 29, 27 +; CHECK-NEXT: add 27, 0, 27 +; CHECK-NEXT: slwi 27, 27, 4 +; CHECK-NEXT: evlddx 27, 12, 27 +; CHECK-NEXT: addi 28, 28, 6 +; CHECK-NEXT: mullw 29, 29, 28 +; CHECK-NEXT: add 0, 0, 29 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: evlddx 12, 12, 0 +; CHECK-NEXT: efdsub 12, 27, 12 +; CHECK-NEXT: li 29, 336 +; CHECK-NEXT: evstddx 12, 31, 29 +; CHECK-NEXT: lwz 12, 868(31) +; CHECK-NEXT: lwz 0, 452(31) +; CHECK-NEXT: lwz 29, 876(31) +; CHECK-NEXT: lwz 28, 764(31) +; CHECK-NEXT: mulli 28, 28, 11 +; CHECK-NEXT: addi 27, 28, 5 +; CHECK-NEXT: mullw 27, 29, 27 +; CHECK-NEXT: add 27, 0, 27 +; CHECK-NEXT: slwi 27, 27, 4 +; CHECK-NEXT: add 27, 12, 27 +; CHECK-NEXT: evlddx 27, 27, 3 +; CHECK-NEXT: addi 28, 28, 6 +; CHECK-NEXT: mullw 29, 29, 28 +; CHECK-NEXT: add 0, 0, 29 +; CHECK-NEXT: slwi 0, 0, 4 +; CHECK-NEXT: add 12, 12, 0 +; CHECK-NEXT: evlddx 12, 12, 3 +; CHECK-NEXT: efdsub 0, 27, 12 +; CHECK-NEXT: addi 12, 31, 336 +; CHECK-NEXT: evstddx 0, 12, 3 +; CHECK-NEXT: li 29, 432 +; CHECK-NEXT: evlddx 0, 31, 29 +; CHECK-NEXT: li 29, 416 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 400 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 384 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 368 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: li 29, 352 +; CHECK-NEXT: evlddx 29, 31, 29 +; CHECK-NEXT: efdadd 0, 0, 29 +; CHECK-NEXT: lwz 29, 864(31) +; CHECK-NEXT: lwz 28, 452(31) +; CHECK-NEXT: lwz 27, 876(31) +; CHECK-NEXT: lwz 26, 764(31) +; CHECK-NEXT: mullw 27, 27, 26 +; CHECK-NEXT: add 28, 28, 27 +; CHECK-NEXT: slwi 28, 28, 4 +; CHECK-NEXT: evstddx 0, 29, 28 +; CHECK-NEXT: addi 29, 31, 432 +; CHECK-NEXT: evlddx 0, 29, 3 +; CHECK-NEXT: evlddx 28, 5, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 7, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 9, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 11, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: evlddx 28, 30, 3 +; CHECK-NEXT: efdadd 0, 0, 28 +; CHECK-NEXT: lwz 28, 864(31) +; CHECK-NEXT: lwz 27, 452(31) +; CHECK-NEXT: lwz 26, 876(31) +; CHECK-NEXT: lwz 25, 764(31) +; CHECK-NEXT: mullw 26, 26, 25 +; CHECK-NEXT: add 27, 27, 26 +; CHECK-NEXT: slwi 27, 27, 4 +; CHECK-NEXT: add 28, 28, 27 +; CHECK-NEXT: evstddx 0, 28, 3 +; CHECK-NEXT: li 28, 432 +; CHECK-NEXT: evlddx 28, 31, 28 +; CHECK-NEXT: li 27, 416 +; CHECK-NEXT: evlddx 27, 31, 27 +; CHECK-NEXT: li 0, .LCPI1_5@l +; CHECK-NEXT: lis 26, .LCPI1_5@ha +; CHECK-NEXT: evlddx 0, 26, 0 +; CHECK-NEXT: efdmul 27, 27, 0 +; CHECK-NEXT: efdadd 27, 27, 28 +; CHECK-NEXT: li 28, 400 +; CHECK-NEXT: evlddx 26, 31, 28 +; CHECK-NEXT: li 28, .LCPI1_6@l +; CHECK-NEXT: lis 25, .LCPI1_6@ha +; CHECK-NEXT: evlddx 28, 25, 28 +; CHECK-NEXT: efdmul 26, 26, 28 +; CHECK-NEXT: efdadd 26, 26, 27 +; CHECK-NEXT: li 27, 384 +; CHECK-NEXT: evlddx 25, 31, 27 +; CHECK-NEXT: li 27, .LCPI1_7@l +; CHECK-NEXT: lis 24, .LCPI1_7@ha +; CHECK-NEXT: evlddx 27, 24, 27 +; CHECK-NEXT: efdmul 25, 25, 27 +; CHECK-NEXT: efdadd 26, 25, 26 +; CHECK-NEXT: li 25, 368 +; CHECK-NEXT: evlddx 24, 31, 25 +; CHECK-NEXT: li 25, .LCPI1_8@l +; CHECK-NEXT: lis 23, .LCPI1_8@ha +; CHECK-NEXT: evlddx 25, 23, 25 +; CHECK-NEXT: efdmul 24, 24, 25 +; CHECK-NEXT: efdadd 24, 24, 26 +; CHECK-NEXT: li 26, 352 +; CHECK-NEXT: evlddx 23, 31, 26 +; CHECK-NEXT: li 26, .LCPI1_9@l +; CHECK-NEXT: lis 22, .LCPI1_9@ha +; CHECK-NEXT: evlddx 26, 22, 26 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evstdd 24, 224(31) +; CHECK-NEXT: evlddx 24, 29, 3 +; CHECK-NEXT: evlddx 23, 5, 3 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 7, 3 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 9, 3 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 11, 3 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 30, 3 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 23, 23, 24 +; CHECK-NEXT: addi 24, 31, 224 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: li 23, 832 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: li 22, 272 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 816 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: li 20, 288 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdmul 23, 23, 22 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: li 22, 800 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 304 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: li 22, 784 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 320 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: li 22, 768 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 336 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: addi 22, 31, 208 +; CHECK-NEXT: evstddx 23, 22, 3 +; CHECK-NEXT: li 23, 832 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: evlddx 21, 4, 3 +; CHECK-NEXT: li 20, 816 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: evlddx 19, 6, 3 +; CHECK-NEXT: efdmul 20, 20, 19 +; CHECK-NEXT: efdmul 23, 23, 21 +; CHECK-NEXT: efdadd 23, 23, 20 +; CHECK-NEXT: li 21, 800 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 8, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: li 21, 784 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 10, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: li 21, 768 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 12, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: efdneg 23, 23 +; CHECK-NEXT: evstdd 23, 208(31) +; CHECK-NEXT: evldd 23, 224(31) +; CHECK-NEXT: evldd 21, 208(31) +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: li 21, 256 +; CHECK-NEXT: evstddx 23, 31, 21 +; CHECK-NEXT: evlddx 23, 24, 3 +; CHECK-NEXT: evlddx 21, 22, 3 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: addi 21, 31, 256 +; CHECK-NEXT: evstddx 23, 21, 3 +; CHECK-NEXT: evldd 23, 224(31) +; CHECK-NEXT: evldd 20, 208(31) +; CHECK-NEXT: efdsub 23, 23, 20 +; CHECK-NEXT: evstdd 23, 240(31) +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: evlddx 23, 22, 3 +; CHECK-NEXT: efdsub 23, 24, 23 +; CHECK-NEXT: addi 24, 31, 240 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: slwi 20, 22, 4 +; CHECK-NEXT: lwz 19, 876(31) +; CHECK-NEXT: add 23, 20, 23 +; CHECK-NEXT: li 20, -16 +; CHECK-NEXT: evlddx 18, 23, 20 +; CHECK-NEXT: li 17, 256 +; CHECK-NEXT: evlddx 17, 31, 17 +; CHECK-NEXT: lwz 16, 856(31) +; CHECK-NEXT: efdcfsi 16, 16 +; CHECK-NEXT: li 15, -8 +; CHECK-NEXT: evlddx 23, 23, 15 +; CHECK-NEXT: efdmul 23, 16, 23 +; CHECK-NEXT: evlddx 16, 21, 3 +; CHECK-NEXT: efdmul 23, 23, 16 +; CHECK-NEXT: efdmul 18, 18, 17 +; CHECK-NEXT: efdsub 23, 18, 23 +; CHECK-NEXT: lwz 18, 864(31) +; CHECK-NEXT: lwz 17, 764(31) +; CHECK-NEXT: lwz 16, 872(31) +; CHECK-NEXT: add 17, 17, 16 +; CHECK-NEXT: mullw 19, 19, 17 +; CHECK-NEXT: add 22, 22, 19 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: evstddx 23, 18, 22 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: slwi 19, 22, 4 +; CHECK-NEXT: lwz 18, 876(31) +; CHECK-NEXT: add 23, 19, 23 +; CHECK-NEXT: evlddx 20, 23, 20 +; CHECK-NEXT: evlddx 21, 21, 3 +; CHECK-NEXT: lwz 19, 856(31) +; CHECK-NEXT: efdcfsi 19, 19 +; CHECK-NEXT: evlddx 23, 23, 15 +; CHECK-NEXT: efdmul 23, 19, 23 +; CHECK-NEXT: li 19, 256 +; CHECK-NEXT: evlddx 19, 31, 19 +; CHECK-NEXT: efdmul 23, 23, 19 +; CHECK-NEXT: efdmul 21, 20, 21 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 20, 764(31) +; CHECK-NEXT: lwz 19, 872(31) +; CHECK-NEXT: add 20, 20, 19 +; CHECK-NEXT: mullw 20, 18, 20 +; CHECK-NEXT: add 22, 22, 20 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 22, 21, 22 +; CHECK-NEXT: evstddx 23, 22, 3 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 21, 876(31) +; CHECK-NEXT: mulli 20, 21, 9 +; CHECK-NEXT: add 20, 20, 22 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: add 23, 20, 23 +; CHECK-NEXT: li 20, -160 +; CHECK-NEXT: evlddx 19, 23, 20 +; CHECK-NEXT: evldd 18, 240(31) +; CHECK-NEXT: lwz 17, 856(31) +; CHECK-NEXT: efdcfsi 17, 17 +; CHECK-NEXT: li 16, -152 +; CHECK-NEXT: evlddx 23, 23, 16 +; CHECK-NEXT: efdmul 23, 17, 23 +; CHECK-NEXT: evlddx 17, 24, 3 +; CHECK-NEXT: efdmul 23, 23, 17 +; CHECK-NEXT: efdmul 19, 19, 18 +; CHECK-NEXT: efdsub 23, 19, 23 +; CHECK-NEXT: lwz 19, 864(31) +; CHECK-NEXT: lwz 18, 764(31) +; CHECK-NEXT: lwz 17, 872(31) +; CHECK-NEXT: mulli 17, 17, 10 +; CHECK-NEXT: add 18, 18, 17 +; CHECK-NEXT: mullw 21, 21, 18 +; CHECK-NEXT: add 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: evstddx 23, 19, 22 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 21, 876(31) +; CHECK-NEXT: mulli 19, 21, 9 +; CHECK-NEXT: add 19, 19, 22 +; CHECK-NEXT: slwi 19, 19, 4 +; CHECK-NEXT: add 23, 19, 23 +; CHECK-NEXT: evlddx 20, 23, 20 +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: lwz 19, 856(31) +; CHECK-NEXT: efdcfsi 19, 19 +; CHECK-NEXT: evlddx 23, 23, 16 +; CHECK-NEXT: efdmul 23, 19, 23 +; CHECK-NEXT: evldd 19, 240(31) +; CHECK-NEXT: efdmul 23, 23, 19 +; CHECK-NEXT: efdmul 24, 20, 24 +; CHECK-NEXT: efdadd 24, 24, 23 +; CHECK-NEXT: lwz 23, 864(31) +; CHECK-NEXT: lwz 20, 764(31) +; CHECK-NEXT: lwz 19, 872(31) +; CHECK-NEXT: mulli 19, 19, 10 +; CHECK-NEXT: add 20, 20, 19 +; CHECK-NEXT: mullw 21, 21, 20 +; CHECK-NEXT: add 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 23, 23, 22 +; CHECK-NEXT: evstddx 24, 23, 3 +; CHECK-NEXT: li 24, 432 +; CHECK-NEXT: evlddx 24, 31, 24 +; CHECK-NEXT: li 23, 416 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 400 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 384 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 368 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 352 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evstdd 24, 160(31) +; CHECK-NEXT: evlddx 24, 29, 3 +; CHECK-NEXT: evlddx 23, 5, 3 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 7, 3 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 9, 3 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 11, 3 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 30, 3 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 23, 23, 24 +; CHECK-NEXT: addi 24, 31, 160 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: li 23, 816 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: li 22, 272 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 784 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: li 20, 288 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdmul 23, 23, 22 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: li 22, 768 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 304 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: li 22, 800 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 320 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: li 22, 832 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 336 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: addi 22, 31, 144 +; CHECK-NEXT: evstddx 23, 22, 3 +; CHECK-NEXT: li 23, 816 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: evlddx 21, 4, 3 +; CHECK-NEXT: li 20, 784 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: evlddx 19, 6, 3 +; CHECK-NEXT: efdmul 20, 20, 19 +; CHECK-NEXT: efdmul 23, 23, 21 +; CHECK-NEXT: efdadd 23, 23, 20 +; CHECK-NEXT: li 21, 768 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 8, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 23, 23, 21 +; CHECK-NEXT: li 21, 800 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 10, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 23, 23, 21 +; CHECK-NEXT: li 21, 832 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 12, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 23, 23, 21 +; CHECK-NEXT: efdneg 23, 23 +; CHECK-NEXT: evstdd 23, 144(31) +; CHECK-NEXT: evldd 23, 160(31) +; CHECK-NEXT: evldd 21, 144(31) +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: evstdd 23, 192(31) +; CHECK-NEXT: evlddx 23, 24, 3 +; CHECK-NEXT: evlddx 21, 22, 3 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: addi 21, 31, 192 +; CHECK-NEXT: evstddx 23, 21, 3 +; CHECK-NEXT: evldd 23, 160(31) +; CHECK-NEXT: evldd 20, 144(31) +; CHECK-NEXT: efdsub 23, 23, 20 +; CHECK-NEXT: evstdd 23, 176(31) +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: evlddx 23, 22, 3 +; CHECK-NEXT: efdsub 23, 24, 23 +; CHECK-NEXT: addi 24, 31, 176 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: add 19, 20, 22 +; CHECK-NEXT: slwi 19, 19, 4 +; CHECK-NEXT: add 23, 19, 23 +; CHECK-NEXT: li 19, -32 +; CHECK-NEXT: evlddx 18, 23, 19 +; CHECK-NEXT: evldd 17, 192(31) +; CHECK-NEXT: lwz 16, 856(31) +; CHECK-NEXT: efdcfsi 16, 16 +; CHECK-NEXT: li 15, -24 +; CHECK-NEXT: evlddx 23, 23, 15 +; CHECK-NEXT: efdmul 23, 16, 23 +; CHECK-NEXT: evlddx 16, 21, 3 +; CHECK-NEXT: efdmul 23, 23, 16 +; CHECK-NEXT: efdmul 18, 18, 17 +; CHECK-NEXT: efdsub 23, 18, 23 +; CHECK-NEXT: lwz 18, 864(31) +; CHECK-NEXT: lwz 17, 764(31) +; CHECK-NEXT: lwz 16, 872(31) +; CHECK-NEXT: slwi 16, 16, 1 +; CHECK-NEXT: add 17, 17, 16 +; CHECK-NEXT: mullw 20, 20, 17 +; CHECK-NEXT: add 22, 22, 20 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: evstddx 23, 18, 22 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: add 18, 20, 22 +; CHECK-NEXT: slwi 18, 18, 4 +; CHECK-NEXT: add 23, 18, 23 +; CHECK-NEXT: evlddx 19, 23, 19 +; CHECK-NEXT: evlddx 21, 21, 3 +; CHECK-NEXT: lwz 18, 856(31) +; CHECK-NEXT: efdcfsi 18, 18 +; CHECK-NEXT: evlddx 23, 23, 15 +; CHECK-NEXT: efdmul 23, 18, 23 +; CHECK-NEXT: evldd 18, 192(31) +; CHECK-NEXT: efdmul 23, 23, 18 +; CHECK-NEXT: efdmul 21, 19, 21 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: slwi 18, 18, 1 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: add 22, 22, 20 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 22, 21, 22 +; CHECK-NEXT: evstddx 23, 22, 3 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 21, 876(31) +; CHECK-NEXT: slwi 20, 21, 3 +; CHECK-NEXT: add 20, 20, 22 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: add 23, 20, 23 +; CHECK-NEXT: li 20, -144 +; CHECK-NEXT: evlddx 19, 23, 20 +; CHECK-NEXT: evldd 18, 176(31) +; CHECK-NEXT: lwz 17, 856(31) +; CHECK-NEXT: efdcfsi 17, 17 +; CHECK-NEXT: li 16, -136 +; CHECK-NEXT: evlddx 23, 23, 16 +; CHECK-NEXT: efdmul 23, 17, 23 +; CHECK-NEXT: evlddx 17, 24, 3 +; CHECK-NEXT: efdmul 23, 23, 17 +; CHECK-NEXT: efdmul 19, 19, 18 +; CHECK-NEXT: efdsub 23, 19, 23 +; CHECK-NEXT: lwz 19, 864(31) +; CHECK-NEXT: lwz 18, 764(31) +; CHECK-NEXT: lwz 17, 872(31) +; CHECK-NEXT: mulli 17, 17, 9 +; CHECK-NEXT: add 18, 18, 17 +; CHECK-NEXT: mullw 21, 21, 18 +; CHECK-NEXT: add 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: evstddx 23, 19, 22 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 21, 876(31) +; CHECK-NEXT: slwi 19, 21, 3 +; CHECK-NEXT: add 19, 19, 22 +; CHECK-NEXT: slwi 19, 19, 4 +; CHECK-NEXT: add 23, 19, 23 +; CHECK-NEXT: evlddx 20, 23, 20 +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: lwz 19, 856(31) +; CHECK-NEXT: efdcfsi 19, 19 +; CHECK-NEXT: evlddx 23, 23, 16 +; CHECK-NEXT: efdmul 23, 19, 23 +; CHECK-NEXT: evldd 19, 176(31) +; CHECK-NEXT: efdmul 23, 23, 19 +; CHECK-NEXT: efdmul 24, 20, 24 +; CHECK-NEXT: efdadd 24, 24, 23 +; CHECK-NEXT: lwz 23, 864(31) +; CHECK-NEXT: lwz 20, 764(31) +; CHECK-NEXT: lwz 19, 872(31) +; CHECK-NEXT: mulli 19, 19, 9 +; CHECK-NEXT: add 20, 20, 19 +; CHECK-NEXT: mullw 21, 21, 20 +; CHECK-NEXT: add 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 23, 23, 22 +; CHECK-NEXT: evstddx 24, 23, 3 +; CHECK-NEXT: li 24, 432 +; CHECK-NEXT: evlddx 24, 31, 24 +; CHECK-NEXT: li 23, 416 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 400 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 384 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 368 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 352 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evstdd 24, 96(31) +; CHECK-NEXT: evlddx 24, 29, 3 +; CHECK-NEXT: evlddx 23, 5, 3 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 7, 3 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 9, 3 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 11, 3 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evlddx 23, 30, 3 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 23, 23, 24 +; CHECK-NEXT: addi 24, 31, 96 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: li 23, 800 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: li 22, 272 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 768 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: li 20, 288 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdmul 23, 23, 22 +; CHECK-NEXT: efdsub 23, 23, 21 +; CHECK-NEXT: li 22, 816 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 304 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdsub 23, 23, 22 +; CHECK-NEXT: li 22, 832 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 320 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: li 22, 784 +; CHECK-NEXT: evlddx 22, 31, 22 +; CHECK-NEXT: li 21, 336 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: efdmul 22, 22, 21 +; CHECK-NEXT: efdadd 23, 22, 23 +; CHECK-NEXT: addi 22, 31, 80 +; CHECK-NEXT: evstddx 23, 22, 3 +; CHECK-NEXT: li 23, 800 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: evlddx 21, 4, 3 +; CHECK-NEXT: li 20, 768 +; CHECK-NEXT: evlddx 20, 31, 20 +; CHECK-NEXT: evlddx 19, 6, 3 +; CHECK-NEXT: efdmul 20, 20, 19 +; CHECK-NEXT: efdmul 23, 23, 21 +; CHECK-NEXT: efdsub 23, 23, 20 +; CHECK-NEXT: li 21, 816 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 8, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdsub 23, 23, 21 +; CHECK-NEXT: li 21, 832 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 10, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: li 21, 784 +; CHECK-NEXT: evlddx 21, 31, 21 +; CHECK-NEXT: evlddx 20, 12, 3 +; CHECK-NEXT: efdmul 21, 21, 20 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: efdneg 23, 23 +; CHECK-NEXT: evstdd 23, 80(31) +; CHECK-NEXT: evldd 23, 96(31) +; CHECK-NEXT: evldd 21, 80(31) +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: evstdd 23, 128(31) +; CHECK-NEXT: evlddx 23, 24, 3 +; CHECK-NEXT: evlddx 21, 22, 3 +; CHECK-NEXT: efdadd 23, 23, 21 +; CHECK-NEXT: addi 21, 31, 128 +; CHECK-NEXT: evstddx 23, 21, 3 +; CHECK-NEXT: evldd 23, 96(31) +; CHECK-NEXT: evldd 20, 80(31) +; CHECK-NEXT: efdsub 23, 23, 20 +; CHECK-NEXT: evstdd 23, 112(31) +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: evlddx 23, 22, 3 +; CHECK-NEXT: efdsub 23, 24, 23 +; CHECK-NEXT: addi 24, 31, 112 +; CHECK-NEXT: evstddx 23, 24, 3 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: slwi 19, 20, 1 +; CHECK-NEXT: add 19, 19, 22 +; CHECK-NEXT: slwi 19, 19, 4 +; CHECK-NEXT: add 23, 19, 23 +; CHECK-NEXT: li 19, -48 +; CHECK-NEXT: evlddx 18, 23, 19 +; CHECK-NEXT: evldd 17, 128(31) +; CHECK-NEXT: lwz 16, 856(31) +; CHECK-NEXT: efdcfsi 16, 16 +; CHECK-NEXT: li 15, -40 +; CHECK-NEXT: evlddx 23, 23, 15 +; CHECK-NEXT: efdmul 23, 16, 23 +; CHECK-NEXT: evlddx 16, 21, 3 +; CHECK-NEXT: efdmul 23, 23, 16 +; CHECK-NEXT: efdmul 18, 18, 17 +; CHECK-NEXT: efdsub 23, 18, 23 +; CHECK-NEXT: lwz 18, 864(31) +; CHECK-NEXT: lwz 17, 764(31) +; CHECK-NEXT: lwz 16, 872(31) +; CHECK-NEXT: mulli 16, 16, 3 +; CHECK-NEXT: add 17, 17, 16 +; CHECK-NEXT: mullw 20, 20, 17 +; CHECK-NEXT: add 22, 22, 20 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: evstddx 23, 18, 22 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 20, 876(31) +; CHECK-NEXT: slwi 18, 20, 1 +; CHECK-NEXT: add 18, 18, 22 +; CHECK-NEXT: slwi 18, 18, 4 +; CHECK-NEXT: add 23, 18, 23 +; CHECK-NEXT: evlddx 19, 23, 19 +; CHECK-NEXT: evlddx 21, 21, 3 +; CHECK-NEXT: lwz 18, 856(31) +; CHECK-NEXT: efdcfsi 18, 18 +; CHECK-NEXT: evlddx 23, 23, 15 +; CHECK-NEXT: efdmul 23, 18, 23 +; CHECK-NEXT: evldd 18, 128(31) +; CHECK-NEXT: efdmul 23, 23, 18 +; CHECK-NEXT: efdmul 21, 19, 21 +; CHECK-NEXT: efdadd 23, 21, 23 +; CHECK-NEXT: lwz 21, 864(31) +; CHECK-NEXT: lwz 19, 764(31) +; CHECK-NEXT: lwz 18, 872(31) +; CHECK-NEXT: mulli 18, 18, 3 +; CHECK-NEXT: add 19, 19, 18 +; CHECK-NEXT: mullw 20, 20, 19 +; CHECK-NEXT: add 22, 22, 20 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 22, 21, 22 +; CHECK-NEXT: evstddx 23, 22, 3 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 21, 876(31) +; CHECK-NEXT: mulli 20, 21, 7 +; CHECK-NEXT: add 20, 20, 22 +; CHECK-NEXT: slwi 20, 20, 4 +; CHECK-NEXT: add 23, 20, 23 +; CHECK-NEXT: li 20, -128 +; CHECK-NEXT: evlddx 19, 23, 20 +; CHECK-NEXT: evldd 18, 112(31) +; CHECK-NEXT: lwz 17, 856(31) +; CHECK-NEXT: efdcfsi 17, 17 +; CHECK-NEXT: li 16, -120 +; CHECK-NEXT: evlddx 23, 23, 16 +; CHECK-NEXT: efdmul 23, 17, 23 +; CHECK-NEXT: evlddx 17, 24, 3 +; CHECK-NEXT: efdmul 23, 23, 17 +; CHECK-NEXT: efdmul 19, 19, 18 +; CHECK-NEXT: efdsub 23, 19, 23 +; CHECK-NEXT: lwz 19, 864(31) +; CHECK-NEXT: lwz 18, 764(31) +; CHECK-NEXT: lwz 17, 872(31) +; CHECK-NEXT: slwi 17, 17, 3 +; CHECK-NEXT: add 18, 18, 17 +; CHECK-NEXT: mullw 21, 21, 18 +; CHECK-NEXT: add 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: evstddx 23, 19, 22 +; CHECK-NEXT: lwz 23, 860(31) +; CHECK-NEXT: lwz 22, 452(31) +; CHECK-NEXT: lwz 21, 876(31) +; CHECK-NEXT: mulli 19, 21, 7 +; CHECK-NEXT: add 19, 19, 22 +; CHECK-NEXT: slwi 19, 19, 4 +; CHECK-NEXT: add 23, 19, 23 +; CHECK-NEXT: evlddx 20, 23, 20 +; CHECK-NEXT: evlddx 24, 24, 3 +; CHECK-NEXT: lwz 19, 856(31) +; CHECK-NEXT: efdcfsi 19, 19 +; CHECK-NEXT: evlddx 23, 23, 16 +; CHECK-NEXT: efdmul 23, 19, 23 +; CHECK-NEXT: evldd 19, 112(31) +; CHECK-NEXT: efdmul 23, 23, 19 +; CHECK-NEXT: efdmul 24, 20, 24 +; CHECK-NEXT: efdadd 24, 24, 23 +; CHECK-NEXT: lwz 23, 864(31) +; CHECK-NEXT: lwz 20, 764(31) +; CHECK-NEXT: lwz 19, 872(31) +; CHECK-NEXT: slwi 19, 19, 3 +; CHECK-NEXT: add 20, 20, 19 +; CHECK-NEXT: mullw 21, 21, 20 +; CHECK-NEXT: add 22, 22, 21 +; CHECK-NEXT: slwi 22, 22, 4 +; CHECK-NEXT: add 23, 23, 22 +; CHECK-NEXT: evstddx 24, 23, 3 +; CHECK-NEXT: li 24, 432 +; CHECK-NEXT: evlddx 24, 31, 24 +; CHECK-NEXT: li 23, 416 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 25 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 400 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 27 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 384 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 0 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 368 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 26 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: li 23, 352 +; CHECK-NEXT: evlddx 23, 31, 23 +; CHECK-NEXT: efdmul 23, 23, 28 +; CHECK-NEXT: efdadd 24, 23, 24 +; CHECK-NEXT: evstdd 24, 32(31) +; CHECK-NEXT: evlddx 29, 29, 3 +; CHECK-NEXT: evlddx 5, 5, 3 +; CHECK-NEXT: efdmul 5, 5, 25 +; CHECK-NEXT: efdadd 5, 5, 29 +; CHECK-NEXT: evlddx 7, 7, 3 +; CHECK-NEXT: efdmul 7, 7, 27 +; CHECK-NEXT: efdadd 5, 7, 5 +; CHECK-NEXT: evlddx 7, 9, 3 +; CHECK-NEXT: efdmul 7, 7, 0 +; CHECK-NEXT: efdadd 5, 7, 5 +; CHECK-NEXT: evlddx 7, 11, 3 +; CHECK-NEXT: efdmul 7, 7, 26 +; CHECK-NEXT: efdadd 5, 7, 5 +; CHECK-NEXT: evlddx 7, 30, 3 +; CHECK-NEXT: efdmul 7, 7, 28 +; CHECK-NEXT: efdadd 7, 7, 5 +; CHECK-NEXT: addi 5, 31, 32 +; CHECK-NEXT: evstddx 7, 5, 3 +; CHECK-NEXT: li 7, 784 +; CHECK-NEXT: evlddx 7, 31, 7 +; CHECK-NEXT: li 9, 272 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 800 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: li 30, 288 +; CHECK-NEXT: evlddx 0, 31, 30 +; CHECK-NEXT: efdmul 11, 11, 0 +; CHECK-NEXT: efdmul 7, 7, 9 +; CHECK-NEXT: efdsub 7, 7, 11 +; CHECK-NEXT: li 9, 832 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 304 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: efdmul 9, 9, 11 +; CHECK-NEXT: efdadd 7, 9, 7 +; CHECK-NEXT: li 9, 768 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 320 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: efdmul 9, 9, 11 +; CHECK-NEXT: efdadd 7, 9, 7 +; CHECK-NEXT: li 9, 816 +; CHECK-NEXT: evlddx 9, 31, 9 +; CHECK-NEXT: li 11, 336 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: efdmul 9, 9, 11 +; CHECK-NEXT: efdsub 7, 7, 9 +; CHECK-NEXT: addi 9, 31, 16 +; CHECK-NEXT: evstddx 7, 9, 3 +; CHECK-NEXT: li 7, 784 +; CHECK-NEXT: evlddx 7, 31, 7 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: li 11, 800 +; CHECK-NEXT: evlddx 11, 31, 11 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: efdmul 6, 11, 6 +; CHECK-NEXT: efdmul 4, 7, 4 +; CHECK-NEXT: efdsub 4, 4, 6 +; CHECK-NEXT: li 6, 832 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: evlddx 7, 8, 3 +; CHECK-NEXT: efdmul 6, 6, 7 +; CHECK-NEXT: efdadd 4, 6, 4 +; CHECK-NEXT: li 6, 768 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: evlddx 7, 10, 3 +; CHECK-NEXT: efdmul 6, 6, 7 +; CHECK-NEXT: efdadd 4, 6, 4 +; CHECK-NEXT: li 6, 816 +; CHECK-NEXT: evlddx 6, 31, 6 +; CHECK-NEXT: evlddx 7, 12, 3 +; CHECK-NEXT: efdmul 6, 6, 7 +; CHECK-NEXT: efdsub 4, 4, 6 +; CHECK-NEXT: efdneg 4, 4 +; CHECK-NEXT: evstdd 4, 16(31) +; CHECK-NEXT: evldd 4, 32(31) +; CHECK-NEXT: evldd 6, 16(31) +; CHECK-NEXT: efdadd 4, 4, 6 +; CHECK-NEXT: evstdd 4, 64(31) +; CHECK-NEXT: evlddx 4, 5, 3 +; CHECK-NEXT: evlddx 6, 9, 3 +; CHECK-NEXT: efdadd 4, 4, 6 +; CHECK-NEXT: addi 6, 31, 64 +; CHECK-NEXT: evstddx 4, 6, 3 +; CHECK-NEXT: evldd 4, 32(31) +; CHECK-NEXT: evldd 7, 16(31) +; CHECK-NEXT: efdsub 4, 4, 7 +; CHECK-NEXT: evstdd 4, 48(31) +; CHECK-NEXT: evlddx 4, 5, 3 +; CHECK-NEXT: evlddx 5, 9, 3 +; CHECK-NEXT: efdsub 5, 4, 5 +; CHECK-NEXT: addi 4, 31, 48 +; CHECK-NEXT: evstddx 5, 4, 3 +; CHECK-NEXT: lwz 5, 860(31) +; CHECK-NEXT: lwz 7, 452(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: mulli 9, 8, 3 +; CHECK-NEXT: add 9, 9, 7 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: add 5, 9, 5 +; CHECK-NEXT: li 9, -64 +; CHECK-NEXT: evlddx 10, 5, 9 +; CHECK-NEXT: evldd 11, 64(31) +; CHECK-NEXT: lwz 12, 856(31) +; CHECK-NEXT: efdcfsi 12, 12 +; CHECK-NEXT: li 0, -56 +; CHECK-NEXT: evlddx 5, 5, 0 +; CHECK-NEXT: efdmul 5, 12, 5 +; CHECK-NEXT: evlddx 12, 6, 3 +; CHECK-NEXT: efdmul 5, 5, 12 +; CHECK-NEXT: efdmul 10, 10, 11 +; CHECK-NEXT: efdsub 5, 10, 5 +; CHECK-NEXT: lwz 10, 864(31) +; CHECK-NEXT: lwz 11, 764(31) +; CHECK-NEXT: lwz 12, 872(31) +; CHECK-NEXT: slwi 12, 12, 2 +; CHECK-NEXT: add 11, 11, 12 +; CHECK-NEXT: mullw 8, 8, 11 +; CHECK-NEXT: add 7, 7, 8 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: evstddx 5, 10, 7 +; CHECK-NEXT: lwz 5, 860(31) +; CHECK-NEXT: lwz 7, 452(31) +; CHECK-NEXT: lwz 8, 876(31) +; CHECK-NEXT: mulli 10, 8, 3 +; CHECK-NEXT: add 10, 10, 7 +; CHECK-NEXT: slwi 10, 10, 4 +; CHECK-NEXT: add 5, 10, 5 +; CHECK-NEXT: evlddx 9, 5, 9 +; CHECK-NEXT: evlddx 6, 6, 3 +; CHECK-NEXT: lwz 10, 856(31) +; CHECK-NEXT: efdcfsi 10, 10 +; CHECK-NEXT: evlddx 5, 5, 0 +; CHECK-NEXT: efdmul 5, 10, 5 +; CHECK-NEXT: evldd 10, 64(31) +; CHECK-NEXT: efdmul 5, 5, 10 +; CHECK-NEXT: efdmul 6, 9, 6 +; CHECK-NEXT: efdadd 5, 6, 5 +; CHECK-NEXT: lwz 6, 864(31) +; CHECK-NEXT: lwz 9, 764(31) +; CHECK-NEXT: lwz 10, 872(31) +; CHECK-NEXT: slwi 10, 10, 2 +; CHECK-NEXT: add 9, 9, 10 +; CHECK-NEXT: mullw 8, 8, 9 +; CHECK-NEXT: add 7, 7, 8 +; CHECK-NEXT: slwi 7, 7, 4 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: evstddx 5, 6, 3 +; CHECK-NEXT: lwz 5, 860(31) +; CHECK-NEXT: lwz 6, 452(31) +; CHECK-NEXT: lwz 7, 876(31) +; CHECK-NEXT: mulli 8, 7, 6 +; CHECK-NEXT: add 8, 8, 6 +; CHECK-NEXT: slwi 8, 8, 4 +; CHECK-NEXT: add 5, 8, 5 +; CHECK-NEXT: li 8, -112 +; CHECK-NEXT: evlddx 9, 5, 8 +; CHECK-NEXT: evldd 10, 48(31) +; CHECK-NEXT: lwz 11, 856(31) +; CHECK-NEXT: efdcfsi 11, 11 +; CHECK-NEXT: li 12, -104 +; CHECK-NEXT: evlddx 5, 5, 12 +; CHECK-NEXT: efdmul 5, 11, 5 +; CHECK-NEXT: evlddx 11, 4, 3 +; CHECK-NEXT: efdmul 5, 5, 11 +; CHECK-NEXT: efdmul 9, 9, 10 +; CHECK-NEXT: efdsub 5, 9, 5 +; CHECK-NEXT: lwz 9, 864(31) +; CHECK-NEXT: lwz 10, 764(31) +; CHECK-NEXT: lwz 11, 872(31) +; CHECK-NEXT: mulli 11, 11, 7 +; CHECK-NEXT: add 10, 10, 11 +; CHECK-NEXT: mullw 7, 7, 10 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: evstddx 5, 9, 6 +; CHECK-NEXT: lwz 5, 860(31) +; CHECK-NEXT: lwz 6, 452(31) +; CHECK-NEXT: lwz 7, 876(31) +; CHECK-NEXT: mulli 9, 7, 6 +; CHECK-NEXT: add 9, 9, 6 +; CHECK-NEXT: slwi 9, 9, 4 +; CHECK-NEXT: add 5, 9, 5 +; CHECK-NEXT: evlddx 8, 5, 8 +; CHECK-NEXT: evlddx 4, 4, 3 +; CHECK-NEXT: lwz 9, 856(31) +; CHECK-NEXT: efdcfsi 9, 9 +; CHECK-NEXT: evlddx 5, 5, 12 +; CHECK-NEXT: efdmul 5, 9, 5 +; CHECK-NEXT: evldd 9, 48(31) +; CHECK-NEXT: efdmul 5, 5, 9 +; CHECK-NEXT: efdmul 4, 8, 4 +; CHECK-NEXT: efdadd 4, 4, 5 +; CHECK-NEXT: lwz 5, 864(31) +; CHECK-NEXT: lwz 8, 764(31) +; CHECK-NEXT: lwz 9, 872(31) +; CHECK-NEXT: mulli 9, 9, 7 +; CHECK-NEXT: add 8, 8, 9 +; CHECK-NEXT: mullw 7, 7, 8 +; CHECK-NEXT: add 6, 6, 7 +; CHECK-NEXT: slwi 6, 6, 4 +; CHECK-NEXT: add 5, 5, 6 +; CHECK-NEXT: evstddx 4, 5, 3 +; CHECK-NEXT: b .LBB1_5 +; CHECK-NEXT: .LBB1_5: +; CHECK-NEXT: lwz 3, 452(31) +; CHECK-NEXT: addi 3, 3, 1 +; CHECK-NEXT: stw 3, 452(31) +; CHECK-NEXT: b .LBB1_3 +; CHECK-NEXT: .LBB1_6: +; CHECK-NEXT: b .LBB1_7 +; CHECK-NEXT: .LBB1_7: +; CHECK-NEXT: lwz 3, 764(31) +; CHECK-NEXT: addi 3, 3, 1 +; CHECK-NEXT: stw 3, 764(31) +; CHECK-NEXT: b .LBB1_1 +; CHECK-NEXT: .LBB1_8: +; CHECK-NEXT: lwz 30, 1080(31) # 4-byte Folded Reload +; CHECK-NEXT: li 3, 984 +; CHECK-NEXT: evlddx 29, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 976 +; CHECK-NEXT: evlddx 28, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 968 +; CHECK-NEXT: evlddx 27, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 960 +; CHECK-NEXT: evlddx 26, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 952 +; CHECK-NEXT: evlddx 25, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 944 +; CHECK-NEXT: evlddx 24, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 936 +; CHECK-NEXT: evlddx 23, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 928 +; CHECK-NEXT: evlddx 22, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 920 +; CHECK-NEXT: evlddx 21, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 912 +; CHECK-NEXT: evlddx 20, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 904 +; CHECK-NEXT: evlddx 19, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 896 +; CHECK-NEXT: evlddx 18, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 888 +; CHECK-NEXT: evlddx 17, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: li 3, 880 +; CHECK-NEXT: evlddx 16, 31, 3 # 8-byte Folded Reload +; CHECK-NEXT: lwz 15, 1020(31) # 4-byte Folded Reload +; CHECK-NEXT: lwz 31, 1084(1) +; CHECK-NEXT: addi 1, 1, 1088 +; CHECK-NEXT: blr + %7 = alloca i32, align 4 + %8 = alloca i32, align 4 + %9 = alloca ptr, align 4 + %10 = alloca ptr, align 4 + %11 = alloca ptr, align 4 + %12 = alloca i32, align 4 + %13 = alloca i32, align 4 + %14 = alloca double, align 8 + %15 = alloca double, align 8 + %16 = alloca double, align 8 + %17 = alloca double, align 8 + %18 = alloca double, align 8 + %19 = alloca double, align 8 + %20 = alloca double, align 8 + %21 = alloca double, align 8 + %22 = alloca double, align 8 + %23 = alloca double, align 8 + %24 = alloca i32, align 4 + %25 = alloca %struct.cmplx, align 8 + %26 = alloca %struct.cmplx, align 8 + %27 = alloca %struct.cmplx, align 8 + %28 = alloca %struct.cmplx, align 8 + %29 = alloca %struct.cmplx, align 8 + %30 = alloca %struct.cmplx, align 8 + %31 = alloca %struct.cmplx, align 8 + %32 = alloca %struct.cmplx, align 8 + %33 = alloca %struct.cmplx, align 8 + %34 = alloca %struct.cmplx, align 8 + %35 = alloca %struct.cmplx, align 8 + %36 = alloca %struct.cmplx, align 8 + %37 = alloca %struct.cmplx, align 8 + %38 = alloca %struct.cmplx, align 8 + %39 = alloca %struct.cmplx, align 8 + %40 = alloca %struct.cmplx, align 8 + %41 = alloca %struct.cmplx, align 8 + %42 = alloca %struct.cmplx, align 8 + %43 = alloca %struct.cmplx, align 8 + %44 = alloca i32, align 4 + %45 = alloca %struct.cmplx, align 8 + %46 = alloca %struct.cmplx, align 8 + %47 = alloca %struct.cmplx, align 8 + %48 = alloca %struct.cmplx, align 8 + %49 = alloca %struct.cmplx, align 8 + %50 = alloca %struct.cmplx, align 8 + %51 = alloca %struct.cmplx, align 8 + %52 = alloca %struct.cmplx, align 8 + %53 = alloca %struct.cmplx, align 8 + %54 = alloca %struct.cmplx, align 8 + %55 = alloca %struct.cmplx, align 8 + %56 = alloca %struct.cmplx, align 8 + %57 = alloca %struct.cmplx, align 8 + %58 = alloca %struct.cmplx, align 8 + %59 = alloca %struct.cmplx, align 8 + %60 = alloca %struct.cmplx, align 8 + %61 = alloca %struct.cmplx, align 8 + %62 = alloca %struct.cmplx, align 8 + %63 = alloca %struct.cmplx, align 8 + %64 = alloca %struct.cmplx, align 8 + %65 = alloca %struct.cmplx, align 8 + %66 = alloca %struct.cmplx, align 8 + %67 = alloca %struct.cmplx, align 8 + %68 = alloca %struct.cmplx, align 8 + %69 = alloca %struct.cmplx, align 8 + %70 = alloca %struct.cmplx, align 8 + %71 = alloca %struct.cmplx, align 8 + store i32 %0, ptr %7, align 4 + store i32 %1, ptr %8, align 4 + store ptr %2, ptr %9, align 4 + store ptr %3, ptr %10, align 4 + store ptr %4, ptr %11, align 4 + store i32 %5, ptr %12, align 4 + store i32 11, ptr %13, align 4 + store double 0x3FEAEB8C8764F0BA, ptr %14, align 8 + %72 = load i32, ptr %12, align 4 + %73 = sitofp i32 %72 to double + %74 = fmul double %73, 0x3FE14CEDF8BB580B + store double %74, ptr %15, align 8 + store double 0x3FDA9628D9C712B6, ptr %16, align 8 + %75 = load i32, ptr %12, align 4 + %76 = sitofp i32 %75 to double + %77 = fmul double %76, 0x3FED1BB48EEE2C13 + store double %77, ptr %17, align 8 + store double 0xBFC2375F640F44DB, ptr %18, align 8 + %78 = load i32, ptr %12, align 4 + %79 = sitofp i32 %78 to double + %80 = fmul double %79, 0x3FEFAC9E043842EF + store double %80, ptr %19, align 8 + store double 0xBFE4F49E7F775887, ptr %20, align 8 + %81 = load i32, ptr %12, align 4 + %82 = sitofp i32 %81 to double + %83 = fmul double %82, 0x3FE82F19BB3A28A1 + store double %83, ptr %21, align 8 + store double 0xBFEEB42A9BCD5057, ptr %22, align 8 + %84 = load i32, ptr %12, align 4 + %85 = sitofp i32 %84 to double + %86 = fmul double %85, 0x3FD207E7FD768DBF + store double %86, ptr %23, align 8 + store i32 0, ptr %24, align 4 + br label %87 + +87: ; preds = %2792, %6 + %88 = load i32, ptr %24, align 4 + %89 = load i32, ptr %8, align 4 + %90 = icmp ult i32 %88, %89 + br i1 %90, label %91, label %2795 + +91: ; preds = %87 + %92 = load ptr, ptr %9, align 4 + %93 = load i32, ptr %7, align 4 + %94 = load i32, ptr %24, align 4 + %95 = mul i32 11, %94 + %96 = add i32 0, %95 + %97 = mul i32 %93, %96 + %98 = add i32 0, %97 + %99 = getelementptr inbounds %struct.cmplx, ptr %92, i32 %98 + call void @llvm.memcpy.p0.p0.i32(ptr align 8 %25, ptr align 8 %99, i32 16, i1 false) + %100 = load ptr, ptr %9, align 4 + %101 = load i32, ptr %7, align 4 + %102 = load i32, ptr %24, align 4 + %103 = mul i32 11, %102 + %104 = add i32 1, %103 + %105 = mul i32 %101, %104 + %106 = add i32 0, %105 + %107 = getelementptr inbounds %struct.cmplx, ptr %100, i32 %106 + %108 = getelementptr inbounds %struct.cmplx, ptr %107, i32 0, i32 0 + %109 = load double, ptr %108, align 8 + %110 = load ptr, ptr %9, align 4 + %111 = load i32, ptr %7, align 4 + %112 = load i32, ptr %24, align 4 + %113 = mul i32 11, %112 + %114 = add i32 10, %113 + %115 = mul i32 %111, %114 + %116 = add i32 0, %115 + %117 = getelementptr inbounds %struct.cmplx, ptr %110, i32 %116 + %118 = getelementptr inbounds %struct.cmplx, ptr %117, i32 0, i32 0 + %119 = load double, ptr %118, align 8 + %120 = fadd double %109, %119 + %121 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 0 + store double %120, ptr %121, align 8 + %122 = load ptr, ptr %9, align 4 + %123 = load i32, ptr %7, align 4 + %124 = load i32, ptr %24, align 4 + %125 = mul i32 11, %124 + %126 = add i32 1, %125 + %127 = mul i32 %123, %126 + %128 = add i32 0, %127 + %129 = getelementptr inbounds %struct.cmplx, ptr %122, i32 %128 + %130 = getelementptr inbounds %struct.cmplx, ptr %129, i32 0, i32 1 + %131 = load double, ptr %130, align 8 + %132 = load ptr, ptr %9, align 4 + %133 = load i32, ptr %7, align 4 + %134 = load i32, ptr %24, align 4 + %135 = mul i32 11, %134 + %136 = add i32 10, %135 + %137 = mul i32 %133, %136 + %138 = add i32 0, %137 + %139 = getelementptr inbounds %struct.cmplx, ptr %132, i32 %138 + %140 = getelementptr inbounds %struct.cmplx, ptr %139, i32 0, i32 1 + %141 = load double, ptr %140, align 8 + %142 = fadd double %131, %141 + %143 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 1 + store double %142, ptr %143, align 8 + %144 = load ptr, ptr %9, align 4 + %145 = load i32, ptr %7, align 4 + %146 = load i32, ptr %24, align 4 + %147 = mul i32 11, %146 + %148 = add i32 1, %147 + %149 = mul i32 %145, %148 + %150 = add i32 0, %149 + %151 = getelementptr inbounds %struct.cmplx, ptr %144, i32 %150 + %152 = getelementptr inbounds %struct.cmplx, ptr %151, i32 0, i32 0 + %153 = load double, ptr %152, align 8 + %154 = load ptr, ptr %9, align 4 + %155 = load i32, ptr %7, align 4 + %156 = load i32, ptr %24, align 4 + %157 = mul i32 11, %156 + %158 = add i32 10, %157 + %159 = mul i32 %155, %158 + %160 = add i32 0, %159 + %161 = getelementptr inbounds %struct.cmplx, ptr %154, i32 %160 + %162 = getelementptr inbounds %struct.cmplx, ptr %161, i32 0, i32 0 + %163 = load double, ptr %162, align 8 + %164 = fsub double %153, %163 + %165 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 0 + store double %164, ptr %165, align 8 + %166 = load ptr, ptr %9, align 4 + %167 = load i32, ptr %7, align 4 + %168 = load i32, ptr %24, align 4 + %169 = mul i32 11, %168 + %170 = add i32 1, %169 + %171 = mul i32 %167, %170 + %172 = add i32 0, %171 + %173 = getelementptr inbounds %struct.cmplx, ptr %166, i32 %172 + %174 = getelementptr inbounds %struct.cmplx, ptr %173, i32 0, i32 1 + %175 = load double, ptr %174, align 8 + %176 = load ptr, ptr %9, align 4 + %177 = load i32, ptr %7, align 4 + %178 = load i32, ptr %24, align 4 + %179 = mul i32 11, %178 + %180 = add i32 10, %179 + %181 = mul i32 %177, %180 + %182 = add i32 0, %181 + %183 = getelementptr inbounds %struct.cmplx, ptr %176, i32 %182 + %184 = getelementptr inbounds %struct.cmplx, ptr %183, i32 0, i32 1 + %185 = load double, ptr %184, align 8 + %186 = fsub double %175, %185 + %187 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 1 + store double %186, ptr %187, align 8 + %188 = load ptr, ptr %9, align 4 + %189 = load i32, ptr %7, align 4 + %190 = load i32, ptr %24, align 4 + %191 = mul i32 11, %190 + %192 = add i32 2, %191 + %193 = mul i32 %189, %192 + %194 = add i32 0, %193 + %195 = getelementptr inbounds %struct.cmplx, ptr %188, i32 %194 + %196 = getelementptr inbounds %struct.cmplx, ptr %195, i32 0, i32 0 + %197 = load double, ptr %196, align 8 + %198 = load ptr, ptr %9, align 4 + %199 = load i32, ptr %7, align 4 + %200 = load i32, ptr %24, align 4 + %201 = mul i32 11, %200 + %202 = add i32 9, %201 + %203 = mul i32 %199, %202 + %204 = add i32 0, %203 + %205 = getelementptr inbounds %struct.cmplx, ptr %198, i32 %204 + %206 = getelementptr inbounds %struct.cmplx, ptr %205, i32 0, i32 0 + %207 = load double, ptr %206, align 8 + %208 = fadd double %197, %207 + %209 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 0 + store double %208, ptr %209, align 8 + %210 = load ptr, ptr %9, align 4 + %211 = load i32, ptr %7, align 4 + %212 = load i32, ptr %24, align 4 + %213 = mul i32 11, %212 + %214 = add i32 2, %213 + %215 = mul i32 %211, %214 + %216 = add i32 0, %215 + %217 = getelementptr inbounds %struct.cmplx, ptr %210, i32 %216 + %218 = getelementptr inbounds %struct.cmplx, ptr %217, i32 0, i32 1 + %219 = load double, ptr %218, align 8 + %220 = load ptr, ptr %9, align 4 + %221 = load i32, ptr %7, align 4 + %222 = load i32, ptr %24, align 4 + %223 = mul i32 11, %222 + %224 = add i32 9, %223 + %225 = mul i32 %221, %224 + %226 = add i32 0, %225 + %227 = getelementptr inbounds %struct.cmplx, ptr %220, i32 %226 + %228 = getelementptr inbounds %struct.cmplx, ptr %227, i32 0, i32 1 + %229 = load double, ptr %228, align 8 + %230 = fadd double %219, %229 + %231 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 1 + store double %230, ptr %231, align 8 + %232 = load ptr, ptr %9, align 4 + %233 = load i32, ptr %7, align 4 + %234 = load i32, ptr %24, align 4 + %235 = mul i32 11, %234 + %236 = add i32 2, %235 + %237 = mul i32 %233, %236 + %238 = add i32 0, %237 + %239 = getelementptr inbounds %struct.cmplx, ptr %232, i32 %238 + %240 = getelementptr inbounds %struct.cmplx, ptr %239, i32 0, i32 0 + %241 = load double, ptr %240, align 8 + %242 = load ptr, ptr %9, align 4 + %243 = load i32, ptr %7, align 4 + %244 = load i32, ptr %24, align 4 + %245 = mul i32 11, %244 + %246 = add i32 9, %245 + %247 = mul i32 %243, %246 + %248 = add i32 0, %247 + %249 = getelementptr inbounds %struct.cmplx, ptr %242, i32 %248 + %250 = getelementptr inbounds %struct.cmplx, ptr %249, i32 0, i32 0 + %251 = load double, ptr %250, align 8 + %252 = fsub double %241, %251 + %253 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 0 + store double %252, ptr %253, align 8 + %254 = load ptr, ptr %9, align 4 + %255 = load i32, ptr %7, align 4 + %256 = load i32, ptr %24, align 4 + %257 = mul i32 11, %256 + %258 = add i32 2, %257 + %259 = mul i32 %255, %258 + %260 = add i32 0, %259 + %261 = getelementptr inbounds %struct.cmplx, ptr %254, i32 %260 + %262 = getelementptr inbounds %struct.cmplx, ptr %261, i32 0, i32 1 + %263 = load double, ptr %262, align 8 + %264 = load ptr, ptr %9, align 4 + %265 = load i32, ptr %7, align 4 + %266 = load i32, ptr %24, align 4 + %267 = mul i32 11, %266 + %268 = add i32 9, %267 + %269 = mul i32 %265, %268 + %270 = add i32 0, %269 + %271 = getelementptr inbounds %struct.cmplx, ptr %264, i32 %270 + %272 = getelementptr inbounds %struct.cmplx, ptr %271, i32 0, i32 1 + %273 = load double, ptr %272, align 8 + %274 = fsub double %263, %273 + %275 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 1 + store double %274, ptr %275, align 8 + %276 = load ptr, ptr %9, align 4 + %277 = load i32, ptr %7, align 4 + %278 = load i32, ptr %24, align 4 + %279 = mul i32 11, %278 + %280 = add i32 3, %279 + %281 = mul i32 %277, %280 + %282 = add i32 0, %281 + %283 = getelementptr inbounds %struct.cmplx, ptr %276, i32 %282 + %284 = getelementptr inbounds %struct.cmplx, ptr %283, i32 0, i32 0 + %285 = load double, ptr %284, align 8 + %286 = load ptr, ptr %9, align 4 + %287 = load i32, ptr %7, align 4 + %288 = load i32, ptr %24, align 4 + %289 = mul i32 11, %288 + %290 = add i32 8, %289 + %291 = mul i32 %287, %290 + %292 = add i32 0, %291 + %293 = getelementptr inbounds %struct.cmplx, ptr %286, i32 %292 + %294 = getelementptr inbounds %struct.cmplx, ptr %293, i32 0, i32 0 + %295 = load double, ptr %294, align 8 + %296 = fadd double %285, %295 + %297 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 0 + store double %296, ptr %297, align 8 + %298 = load ptr, ptr %9, align 4 + %299 = load i32, ptr %7, align 4 + %300 = load i32, ptr %24, align 4 + %301 = mul i32 11, %300 + %302 = add i32 3, %301 + %303 = mul i32 %299, %302 + %304 = add i32 0, %303 + %305 = getelementptr inbounds %struct.cmplx, ptr %298, i32 %304 + %306 = getelementptr inbounds %struct.cmplx, ptr %305, i32 0, i32 1 + %307 = load double, ptr %306, align 8 + %308 = load ptr, ptr %9, align 4 + %309 = load i32, ptr %7, align 4 + %310 = load i32, ptr %24, align 4 + %311 = mul i32 11, %310 + %312 = add i32 8, %311 + %313 = mul i32 %309, %312 + %314 = add i32 0, %313 + %315 = getelementptr inbounds %struct.cmplx, ptr %308, i32 %314 + %316 = getelementptr inbounds %struct.cmplx, ptr %315, i32 0, i32 1 + %317 = load double, ptr %316, align 8 + %318 = fadd double %307, %317 + %319 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 1 + store double %318, ptr %319, align 8 + %320 = load ptr, ptr %9, align 4 + %321 = load i32, ptr %7, align 4 + %322 = load i32, ptr %24, align 4 + %323 = mul i32 11, %322 + %324 = add i32 3, %323 + %325 = mul i32 %321, %324 + %326 = add i32 0, %325 + %327 = getelementptr inbounds %struct.cmplx, ptr %320, i32 %326 + %328 = getelementptr inbounds %struct.cmplx, ptr %327, i32 0, i32 0 + %329 = load double, ptr %328, align 8 + %330 = load ptr, ptr %9, align 4 + %331 = load i32, ptr %7, align 4 + %332 = load i32, ptr %24, align 4 + %333 = mul i32 11, %332 + %334 = add i32 8, %333 + %335 = mul i32 %331, %334 + %336 = add i32 0, %335 + %337 = getelementptr inbounds %struct.cmplx, ptr %330, i32 %336 + %338 = getelementptr inbounds %struct.cmplx, ptr %337, i32 0, i32 0 + %339 = load double, ptr %338, align 8 + %340 = fsub double %329, %339 + %341 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 0 + store double %340, ptr %341, align 8 + %342 = load ptr, ptr %9, align 4 + %343 = load i32, ptr %7, align 4 + %344 = load i32, ptr %24, align 4 + %345 = mul i32 11, %344 + %346 = add i32 3, %345 + %347 = mul i32 %343, %346 + %348 = add i32 0, %347 + %349 = getelementptr inbounds %struct.cmplx, ptr %342, i32 %348 + %350 = getelementptr inbounds %struct.cmplx, ptr %349, i32 0, i32 1 + %351 = load double, ptr %350, align 8 + %352 = load ptr, ptr %9, align 4 + %353 = load i32, ptr %7, align 4 + %354 = load i32, ptr %24, align 4 + %355 = mul i32 11, %354 + %356 = add i32 8, %355 + %357 = mul i32 %353, %356 + %358 = add i32 0, %357 + %359 = getelementptr inbounds %struct.cmplx, ptr %352, i32 %358 + %360 = getelementptr inbounds %struct.cmplx, ptr %359, i32 0, i32 1 + %361 = load double, ptr %360, align 8 + %362 = fsub double %351, %361 + %363 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 1 + store double %362, ptr %363, align 8 + %364 = load ptr, ptr %9, align 4 + %365 = load i32, ptr %7, align 4 + %366 = load i32, ptr %24, align 4 + %367 = mul i32 11, %366 + %368 = add i32 4, %367 + %369 = mul i32 %365, %368 + %370 = add i32 0, %369 + %371 = getelementptr inbounds %struct.cmplx, ptr %364, i32 %370 + %372 = getelementptr inbounds %struct.cmplx, ptr %371, i32 0, i32 0 + %373 = load double, ptr %372, align 8 + %374 = load ptr, ptr %9, align 4 + %375 = load i32, ptr %7, align 4 + %376 = load i32, ptr %24, align 4 + %377 = mul i32 11, %376 + %378 = add i32 7, %377 + %379 = mul i32 %375, %378 + %380 = add i32 0, %379 + %381 = getelementptr inbounds %struct.cmplx, ptr %374, i32 %380 + %382 = getelementptr inbounds %struct.cmplx, ptr %381, i32 0, i32 0 + %383 = load double, ptr %382, align 8 + %384 = fadd double %373, %383 + %385 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 0 + store double %384, ptr %385, align 8 + %386 = load ptr, ptr %9, align 4 + %387 = load i32, ptr %7, align 4 + %388 = load i32, ptr %24, align 4 + %389 = mul i32 11, %388 + %390 = add i32 4, %389 + %391 = mul i32 %387, %390 + %392 = add i32 0, %391 + %393 = getelementptr inbounds %struct.cmplx, ptr %386, i32 %392 + %394 = getelementptr inbounds %struct.cmplx, ptr %393, i32 0, i32 1 + %395 = load double, ptr %394, align 8 + %396 = load ptr, ptr %9, align 4 + %397 = load i32, ptr %7, align 4 + %398 = load i32, ptr %24, align 4 + %399 = mul i32 11, %398 + %400 = add i32 7, %399 + %401 = mul i32 %397, %400 + %402 = add i32 0, %401 + %403 = getelementptr inbounds %struct.cmplx, ptr %396, i32 %402 + %404 = getelementptr inbounds %struct.cmplx, ptr %403, i32 0, i32 1 + %405 = load double, ptr %404, align 8 + %406 = fadd double %395, %405 + %407 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 1 + store double %406, ptr %407, align 8 + %408 = load ptr, ptr %9, align 4 + %409 = load i32, ptr %7, align 4 + %410 = load i32, ptr %24, align 4 + %411 = mul i32 11, %410 + %412 = add i32 4, %411 + %413 = mul i32 %409, %412 + %414 = add i32 0, %413 + %415 = getelementptr inbounds %struct.cmplx, ptr %408, i32 %414 + %416 = getelementptr inbounds %struct.cmplx, ptr %415, i32 0, i32 0 + %417 = load double, ptr %416, align 8 + %418 = load ptr, ptr %9, align 4 + %419 = load i32, ptr %7, align 4 + %420 = load i32, ptr %24, align 4 + %421 = mul i32 11, %420 + %422 = add i32 7, %421 + %423 = mul i32 %419, %422 + %424 = add i32 0, %423 + %425 = getelementptr inbounds %struct.cmplx, ptr %418, i32 %424 + %426 = getelementptr inbounds %struct.cmplx, ptr %425, i32 0, i32 0 + %427 = load double, ptr %426, align 8 + %428 = fsub double %417, %427 + %429 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 0 + store double %428, ptr %429, align 8 + %430 = load ptr, ptr %9, align 4 + %431 = load i32, ptr %7, align 4 + %432 = load i32, ptr %24, align 4 + %433 = mul i32 11, %432 + %434 = add i32 4, %433 + %435 = mul i32 %431, %434 + %436 = add i32 0, %435 + %437 = getelementptr inbounds %struct.cmplx, ptr %430, i32 %436 + %438 = getelementptr inbounds %struct.cmplx, ptr %437, i32 0, i32 1 + %439 = load double, ptr %438, align 8 + %440 = load ptr, ptr %9, align 4 + %441 = load i32, ptr %7, align 4 + %442 = load i32, ptr %24, align 4 + %443 = mul i32 11, %442 + %444 = add i32 7, %443 + %445 = mul i32 %441, %444 + %446 = add i32 0, %445 + %447 = getelementptr inbounds %struct.cmplx, ptr %440, i32 %446 + %448 = getelementptr inbounds %struct.cmplx, ptr %447, i32 0, i32 1 + %449 = load double, ptr %448, align 8 + %450 = fsub double %439, %449 + %451 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 1 + store double %450, ptr %451, align 8 + %452 = load ptr, ptr %9, align 4 + %453 = load i32, ptr %7, align 4 + %454 = load i32, ptr %24, align 4 + %455 = mul i32 11, %454 + %456 = add i32 5, %455 + %457 = mul i32 %453, %456 + %458 = add i32 0, %457 + %459 = getelementptr inbounds %struct.cmplx, ptr %452, i32 %458 + %460 = getelementptr inbounds %struct.cmplx, ptr %459, i32 0, i32 0 + %461 = load double, ptr %460, align 8 + %462 = load ptr, ptr %9, align 4 + %463 = load i32, ptr %7, align 4 + %464 = load i32, ptr %24, align 4 + %465 = mul i32 11, %464 + %466 = add i32 6, %465 + %467 = mul i32 %463, %466 + %468 = add i32 0, %467 + %469 = getelementptr inbounds %struct.cmplx, ptr %462, i32 %468 + %470 = getelementptr inbounds %struct.cmplx, ptr %469, i32 0, i32 0 + %471 = load double, ptr %470, align 8 + %472 = fadd double %461, %471 + %473 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 0 + store double %472, ptr %473, align 8 + %474 = load ptr, ptr %9, align 4 + %475 = load i32, ptr %7, align 4 + %476 = load i32, ptr %24, align 4 + %477 = mul i32 11, %476 + %478 = add i32 5, %477 + %479 = mul i32 %475, %478 + %480 = add i32 0, %479 + %481 = getelementptr inbounds %struct.cmplx, ptr %474, i32 %480 + %482 = getelementptr inbounds %struct.cmplx, ptr %481, i32 0, i32 1 + %483 = load double, ptr %482, align 8 + %484 = load ptr, ptr %9, align 4 + %485 = load i32, ptr %7, align 4 + %486 = load i32, ptr %24, align 4 + %487 = mul i32 11, %486 + %488 = add i32 6, %487 + %489 = mul i32 %485, %488 + %490 = add i32 0, %489 + %491 = getelementptr inbounds %struct.cmplx, ptr %484, i32 %490 + %492 = getelementptr inbounds %struct.cmplx, ptr %491, i32 0, i32 1 + %493 = load double, ptr %492, align 8 + %494 = fadd double %483, %493 + %495 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 1 + store double %494, ptr %495, align 8 + %496 = load ptr, ptr %9, align 4 + %497 = load i32, ptr %7, align 4 + %498 = load i32, ptr %24, align 4 + %499 = mul i32 11, %498 + %500 = add i32 5, %499 + %501 = mul i32 %497, %500 + %502 = add i32 0, %501 + %503 = getelementptr inbounds %struct.cmplx, ptr %496, i32 %502 + %504 = getelementptr inbounds %struct.cmplx, ptr %503, i32 0, i32 0 + %505 = load double, ptr %504, align 8 + %506 = load ptr, ptr %9, align 4 + %507 = load i32, ptr %7, align 4 + %508 = load i32, ptr %24, align 4 + %509 = mul i32 11, %508 + %510 = add i32 6, %509 + %511 = mul i32 %507, %510 + %512 = add i32 0, %511 + %513 = getelementptr inbounds %struct.cmplx, ptr %506, i32 %512 + %514 = getelementptr inbounds %struct.cmplx, ptr %513, i32 0, i32 0 + %515 = load double, ptr %514, align 8 + %516 = fsub double %505, %515 + %517 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 0 + store double %516, ptr %517, align 8 + %518 = load ptr, ptr %9, align 4 + %519 = load i32, ptr %7, align 4 + %520 = load i32, ptr %24, align 4 + %521 = mul i32 11, %520 + %522 = add i32 5, %521 + %523 = mul i32 %519, %522 + %524 = add i32 0, %523 + %525 = getelementptr inbounds %struct.cmplx, ptr %518, i32 %524 + %526 = getelementptr inbounds %struct.cmplx, ptr %525, i32 0, i32 1 + %527 = load double, ptr %526, align 8 + %528 = load ptr, ptr %9, align 4 + %529 = load i32, ptr %7, align 4 + %530 = load i32, ptr %24, align 4 + %531 = mul i32 11, %530 + %532 = add i32 6, %531 + %533 = mul i32 %529, %532 + %534 = add i32 0, %533 + %535 = getelementptr inbounds %struct.cmplx, ptr %528, i32 %534 + %536 = getelementptr inbounds %struct.cmplx, ptr %535, i32 0, i32 1 + %537 = load double, ptr %536, align 8 + %538 = fsub double %527, %537 + %539 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 1 + store double %538, ptr %539, align 8 + %540 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 0 + %541 = load double, ptr %540, align 8 + %542 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 0 + %543 = load double, ptr %542, align 8 + %544 = fadd double %541, %543 + %545 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 0 + %546 = load double, ptr %545, align 8 + %547 = fadd double %544, %546 + %548 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 0 + %549 = load double, ptr %548, align 8 + %550 = fadd double %547, %549 + %551 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 0 + %552 = load double, ptr %551, align 8 + %553 = fadd double %550, %552 + %554 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 0 + %555 = load double, ptr %554, align 8 + %556 = fadd double %553, %555 + %557 = load ptr, ptr %10, align 4 + %558 = load i32, ptr %7, align 4 + %559 = load i32, ptr %24, align 4 + %560 = load i32, ptr %8, align 4 + %561 = mul i32 %560, 0 + %562 = add i32 %559, %561 + %563 = mul i32 %558, %562 + %564 = add i32 0, %563 + %565 = getelementptr inbounds %struct.cmplx, ptr %557, i32 %564 + %566 = getelementptr inbounds %struct.cmplx, ptr %565, i32 0, i32 0 + store double %556, ptr %566, align 8 + %567 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 1 + %568 = load double, ptr %567, align 8 + %569 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 1 + %570 = load double, ptr %569, align 8 + %571 = fadd double %568, %570 + %572 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 1 + %573 = load double, ptr %572, align 8 + %574 = fadd double %571, %573 + %575 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 1 + %576 = load double, ptr %575, align 8 + %577 = fadd double %574, %576 + %578 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 1 + %579 = load double, ptr %578, align 8 + %580 = fadd double %577, %579 + %581 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 1 + %582 = load double, ptr %581, align 8 + %583 = fadd double %580, %582 + %584 = load ptr, ptr %10, align 4 + %585 = load i32, ptr %7, align 4 + %586 = load i32, ptr %24, align 4 + %587 = load i32, ptr %8, align 4 + %588 = mul i32 %587, 0 + %589 = add i32 %586, %588 + %590 = mul i32 %585, %589 + %591 = add i32 0, %590 + %592 = getelementptr inbounds %struct.cmplx, ptr %584, i32 %591 + %593 = getelementptr inbounds %struct.cmplx, ptr %592, i32 0, i32 1 + store double %583, ptr %593, align 8 + %594 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 0 + %595 = load double, ptr %594, align 8 + %596 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 0 + %597 = load double, ptr %596, align 8 + %598 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %597, double %595) + %599 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 0 + %600 = load double, ptr %599, align 8 + %601 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %600, double %598) + %602 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 0 + %603 = load double, ptr %602, align 8 + %604 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %603, double %601) + %605 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 0 + %606 = load double, ptr %605, align 8 + %607 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %606, double %604) + %608 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 0 + %609 = load double, ptr %608, align 8 + %610 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %609, double %607) + %611 = getelementptr inbounds %struct.cmplx, ptr %36, i32 0, i32 0 + store double %610, ptr %611, align 8 + %612 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 1 + %613 = load double, ptr %612, align 8 + %614 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 1 + %615 = load double, ptr %614, align 8 + %616 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %615, double %613) + %617 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 1 + %618 = load double, ptr %617, align 8 + %619 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %618, double %616) + %620 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 1 + %621 = load double, ptr %620, align 8 + %622 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %621, double %619) + %623 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 1 + %624 = load double, ptr %623, align 8 + %625 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %624, double %622) + %626 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 1 + %627 = load double, ptr %626, align 8 + %628 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %627, double %625) + %629 = getelementptr inbounds %struct.cmplx, ptr %36, i32 0, i32 1 + store double %628, ptr %629, align 8 + %630 = load double, ptr %15, align 8 + %631 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 0 + %632 = load double, ptr %631, align 8 + %633 = load double, ptr %17, align 8 + %634 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 0 + %635 = load double, ptr %634, align 8 + %636 = fmul double %633, %635 + %637 = call double @llvm.fmuladd.f64(double %630, double %632, double %636) + %638 = load double, ptr %19, align 8 + %639 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 0 + %640 = load double, ptr %639, align 8 + %641 = call double @llvm.fmuladd.f64(double %638, double %640, double %637) + %642 = load double, ptr %21, align 8 + %643 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 0 + %644 = load double, ptr %643, align 8 + %645 = call double @llvm.fmuladd.f64(double %642, double %644, double %641) + %646 = load double, ptr %23, align 8 + %647 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 0 + %648 = load double, ptr %647, align 8 + %649 = call double @llvm.fmuladd.f64(double %646, double %648, double %645) + %650 = getelementptr inbounds %struct.cmplx, ptr %37, i32 0, i32 1 + store double %649, ptr %650, align 8 + %651 = load double, ptr %15, align 8 + %652 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 1 + %653 = load double, ptr %652, align 8 + %654 = load double, ptr %17, align 8 + %655 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 1 + %656 = load double, ptr %655, align 8 + %657 = fmul double %654, %656 + %658 = call double @llvm.fmuladd.f64(double %651, double %653, double %657) + %659 = load double, ptr %19, align 8 + %660 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 1 + %661 = load double, ptr %660, align 8 + %662 = call double @llvm.fmuladd.f64(double %659, double %661, double %658) + %663 = load double, ptr %21, align 8 + %664 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 1 + %665 = load double, ptr %664, align 8 + %666 = call double @llvm.fmuladd.f64(double %663, double %665, double %662) + %667 = load double, ptr %23, align 8 + %668 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 1 + %669 = load double, ptr %668, align 8 + %670 = call double @llvm.fmuladd.f64(double %667, double %669, double %666) + %671 = fneg double %670 + %672 = getelementptr inbounds %struct.cmplx, ptr %37, i32 0, i32 0 + store double %671, ptr %672, align 8 + %673 = getelementptr inbounds %struct.cmplx, ptr %36, i32 0, i32 0 + %674 = load double, ptr %673, align 8 + %675 = getelementptr inbounds %struct.cmplx, ptr %37, i32 0, i32 0 + %676 = load double, ptr %675, align 8 + %677 = fadd double %674, %676 + %678 = load ptr, ptr %10, align 4 + %679 = load i32, ptr %7, align 4 + %680 = load i32, ptr %24, align 4 + %681 = load i32, ptr %8, align 4 + %682 = mul i32 %681, 1 + %683 = add i32 %680, %682 + %684 = mul i32 %679, %683 + %685 = add i32 0, %684 + %686 = getelementptr inbounds %struct.cmplx, ptr %678, i32 %685 + %687 = getelementptr inbounds %struct.cmplx, ptr %686, i32 0, i32 0 + store double %677, ptr %687, align 8 + %688 = getelementptr inbounds %struct.cmplx, ptr %36, i32 0, i32 1 + %689 = load double, ptr %688, align 8 + %690 = getelementptr inbounds %struct.cmplx, ptr %37, i32 0, i32 1 + %691 = load double, ptr %690, align 8 + %692 = fadd double %689, %691 + %693 = load ptr, ptr %10, align 4 + %694 = load i32, ptr %7, align 4 + %695 = load i32, ptr %24, align 4 + %696 = load i32, ptr %8, align 4 + %697 = mul i32 %696, 1 + %698 = add i32 %695, %697 + %699 = mul i32 %694, %698 + %700 = add i32 0, %699 + %701 = getelementptr inbounds %struct.cmplx, ptr %693, i32 %700 + %702 = getelementptr inbounds %struct.cmplx, ptr %701, i32 0, i32 1 + store double %692, ptr %702, align 8 + %703 = getelementptr inbounds %struct.cmplx, ptr %36, i32 0, i32 0 + %704 = load double, ptr %703, align 8 + %705 = getelementptr inbounds %struct.cmplx, ptr %37, i32 0, i32 0 + %706 = load double, ptr %705, align 8 + %707 = fsub double %704, %706 + %708 = load ptr, ptr %10, align 4 + %709 = load i32, ptr %7, align 4 + %710 = load i32, ptr %24, align 4 + %711 = load i32, ptr %8, align 4 + %712 = mul i32 %711, 10 + %713 = add i32 %710, %712 + %714 = mul i32 %709, %713 + %715 = add i32 0, %714 + %716 = getelementptr inbounds %struct.cmplx, ptr %708, i32 %715 + %717 = getelementptr inbounds %struct.cmplx, ptr %716, i32 0, i32 0 + store double %707, ptr %717, align 8 + %718 = getelementptr inbounds %struct.cmplx, ptr %36, i32 0, i32 1 + %719 = load double, ptr %718, align 8 + %720 = getelementptr inbounds %struct.cmplx, ptr %37, i32 0, i32 1 + %721 = load double, ptr %720, align 8 + %722 = fsub double %719, %721 + %723 = load ptr, ptr %10, align 4 + %724 = load i32, ptr %7, align 4 + %725 = load i32, ptr %24, align 4 + %726 = load i32, ptr %8, align 4 + %727 = mul i32 %726, 10 + %728 = add i32 %725, %727 + %729 = mul i32 %724, %728 + %730 = add i32 0, %729 + %731 = getelementptr inbounds %struct.cmplx, ptr %723, i32 %730 + %732 = getelementptr inbounds %struct.cmplx, ptr %731, i32 0, i32 1 + store double %722, ptr %732, align 8 + %733 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 0 + %734 = load double, ptr %733, align 8 + %735 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 0 + %736 = load double, ptr %735, align 8 + %737 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %736, double %734) + %738 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 0 + %739 = load double, ptr %738, align 8 + %740 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %739, double %737) + %741 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 0 + %742 = load double, ptr %741, align 8 + %743 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %742, double %740) + %744 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 0 + %745 = load double, ptr %744, align 8 + %746 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %745, double %743) + %747 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 0 + %748 = load double, ptr %747, align 8 + %749 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %748, double %746) + %750 = getelementptr inbounds %struct.cmplx, ptr %38, i32 0, i32 0 + store double %749, ptr %750, align 8 + %751 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 1 + %752 = load double, ptr %751, align 8 + %753 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 1 + %754 = load double, ptr %753, align 8 + %755 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %754, double %752) + %756 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 1 + %757 = load double, ptr %756, align 8 + %758 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %757, double %755) + %759 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 1 + %760 = load double, ptr %759, align 8 + %761 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %760, double %758) + %762 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 1 + %763 = load double, ptr %762, align 8 + %764 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %763, double %761) + %765 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 1 + %766 = load double, ptr %765, align 8 + %767 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %766, double %764) + %768 = getelementptr inbounds %struct.cmplx, ptr %38, i32 0, i32 1 + store double %767, ptr %768, align 8 + %769 = load double, ptr %17, align 8 + %770 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 0 + %771 = load double, ptr %770, align 8 + %772 = load double, ptr %21, align 8 + %773 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 0 + %774 = load double, ptr %773, align 8 + %775 = fmul double %772, %774 + %776 = call double @llvm.fmuladd.f64(double %769, double %771, double %775) + %777 = load double, ptr %23, align 8 + %778 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 0 + %779 = load double, ptr %778, align 8 + %780 = fneg double %777 + %781 = call double @llvm.fmuladd.f64(double %780, double %779, double %776) + %782 = load double, ptr %19, align 8 + %783 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 0 + %784 = load double, ptr %783, align 8 + %785 = fneg double %782 + %786 = call double @llvm.fmuladd.f64(double %785, double %784, double %781) + %787 = load double, ptr %15, align 8 + %788 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 0 + %789 = load double, ptr %788, align 8 + %790 = fneg double %787 + %791 = call double @llvm.fmuladd.f64(double %790, double %789, double %786) + %792 = getelementptr inbounds %struct.cmplx, ptr %39, i32 0, i32 1 + store double %791, ptr %792, align 8 + %793 = load double, ptr %17, align 8 + %794 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 1 + %795 = load double, ptr %794, align 8 + %796 = load double, ptr %21, align 8 + %797 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 1 + %798 = load double, ptr %797, align 8 + %799 = fmul double %796, %798 + %800 = call double @llvm.fmuladd.f64(double %793, double %795, double %799) + %801 = load double, ptr %23, align 8 + %802 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 1 + %803 = load double, ptr %802, align 8 + %804 = fneg double %801 + %805 = call double @llvm.fmuladd.f64(double %804, double %803, double %800) + %806 = load double, ptr %19, align 8 + %807 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 1 + %808 = load double, ptr %807, align 8 + %809 = fneg double %806 + %810 = call double @llvm.fmuladd.f64(double %809, double %808, double %805) + %811 = load double, ptr %15, align 8 + %812 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 1 + %813 = load double, ptr %812, align 8 + %814 = fneg double %811 + %815 = call double @llvm.fmuladd.f64(double %814, double %813, double %810) + %816 = fneg double %815 + %817 = getelementptr inbounds %struct.cmplx, ptr %39, i32 0, i32 0 + store double %816, ptr %817, align 8 + %818 = getelementptr inbounds %struct.cmplx, ptr %38, i32 0, i32 0 + %819 = load double, ptr %818, align 8 + %820 = getelementptr inbounds %struct.cmplx, ptr %39, i32 0, i32 0 + %821 = load double, ptr %820, align 8 + %822 = fadd double %819, %821 + %823 = load ptr, ptr %10, align 4 + %824 = load i32, ptr %7, align 4 + %825 = load i32, ptr %24, align 4 + %826 = load i32, ptr %8, align 4 + %827 = mul i32 %826, 2 + %828 = add i32 %825, %827 + %829 = mul i32 %824, %828 + %830 = add i32 0, %829 + %831 = getelementptr inbounds %struct.cmplx, ptr %823, i32 %830 + %832 = getelementptr inbounds %struct.cmplx, ptr %831, i32 0, i32 0 + store double %822, ptr %832, align 8 + %833 = getelementptr inbounds %struct.cmplx, ptr %38, i32 0, i32 1 + %834 = load double, ptr %833, align 8 + %835 = getelementptr inbounds %struct.cmplx, ptr %39, i32 0, i32 1 + %836 = load double, ptr %835, align 8 + %837 = fadd double %834, %836 + %838 = load ptr, ptr %10, align 4 + %839 = load i32, ptr %7, align 4 + %840 = load i32, ptr %24, align 4 + %841 = load i32, ptr %8, align 4 + %842 = mul i32 %841, 2 + %843 = add i32 %840, %842 + %844 = mul i32 %839, %843 + %845 = add i32 0, %844 + %846 = getelementptr inbounds %struct.cmplx, ptr %838, i32 %845 + %847 = getelementptr inbounds %struct.cmplx, ptr %846, i32 0, i32 1 + store double %837, ptr %847, align 8 + %848 = getelementptr inbounds %struct.cmplx, ptr %38, i32 0, i32 0 + %849 = load double, ptr %848, align 8 + %850 = getelementptr inbounds %struct.cmplx, ptr %39, i32 0, i32 0 + %851 = load double, ptr %850, align 8 + %852 = fsub double %849, %851 + %853 = load ptr, ptr %10, align 4 + %854 = load i32, ptr %7, align 4 + %855 = load i32, ptr %24, align 4 + %856 = load i32, ptr %8, align 4 + %857 = mul i32 %856, 9 + %858 = add i32 %855, %857 + %859 = mul i32 %854, %858 + %860 = add i32 0, %859 + %861 = getelementptr inbounds %struct.cmplx, ptr %853, i32 %860 + %862 = getelementptr inbounds %struct.cmplx, ptr %861, i32 0, i32 0 + store double %852, ptr %862, align 8 + %863 = getelementptr inbounds %struct.cmplx, ptr %38, i32 0, i32 1 + %864 = load double, ptr %863, align 8 + %865 = getelementptr inbounds %struct.cmplx, ptr %39, i32 0, i32 1 + %866 = load double, ptr %865, align 8 + %867 = fsub double %864, %866 + %868 = load ptr, ptr %10, align 4 + %869 = load i32, ptr %7, align 4 + %870 = load i32, ptr %24, align 4 + %871 = load i32, ptr %8, align 4 + %872 = mul i32 %871, 9 + %873 = add i32 %870, %872 + %874 = mul i32 %869, %873 + %875 = add i32 0, %874 + %876 = getelementptr inbounds %struct.cmplx, ptr %868, i32 %875 + %877 = getelementptr inbounds %struct.cmplx, ptr %876, i32 0, i32 1 + store double %867, ptr %877, align 8 + %878 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 0 + %879 = load double, ptr %878, align 8 + %880 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 0 + %881 = load double, ptr %880, align 8 + %882 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %881, double %879) + %883 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 0 + %884 = load double, ptr %883, align 8 + %885 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %884, double %882) + %886 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 0 + %887 = load double, ptr %886, align 8 + %888 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %887, double %885) + %889 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 0 + %890 = load double, ptr %889, align 8 + %891 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %890, double %888) + %892 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 0 + %893 = load double, ptr %892, align 8 + %894 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %893, double %891) + %895 = getelementptr inbounds %struct.cmplx, ptr %40, i32 0, i32 0 + store double %894, ptr %895, align 8 + %896 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 1 + %897 = load double, ptr %896, align 8 + %898 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 1 + %899 = load double, ptr %898, align 8 + %900 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %899, double %897) + %901 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 1 + %902 = load double, ptr %901, align 8 + %903 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %902, double %900) + %904 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 1 + %905 = load double, ptr %904, align 8 + %906 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %905, double %903) + %907 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 1 + %908 = load double, ptr %907, align 8 + %909 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %908, double %906) + %910 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 1 + %911 = load double, ptr %910, align 8 + %912 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %911, double %909) + %913 = getelementptr inbounds %struct.cmplx, ptr %40, i32 0, i32 1 + store double %912, ptr %913, align 8 + %914 = load double, ptr %19, align 8 + %915 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 0 + %916 = load double, ptr %915, align 8 + %917 = load double, ptr %23, align 8 + %918 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 0 + %919 = load double, ptr %918, align 8 + %920 = fmul double %917, %919 + %921 = fneg double %920 + %922 = call double @llvm.fmuladd.f64(double %914, double %916, double %921) + %923 = load double, ptr %17, align 8 + %924 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 0 + %925 = load double, ptr %924, align 8 + %926 = fneg double %923 + %927 = call double @llvm.fmuladd.f64(double %926, double %925, double %922) + %928 = load double, ptr %15, align 8 + %929 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 0 + %930 = load double, ptr %929, align 8 + %931 = call double @llvm.fmuladd.f64(double %928, double %930, double %927) + %932 = load double, ptr %21, align 8 + %933 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 0 + %934 = load double, ptr %933, align 8 + %935 = call double @llvm.fmuladd.f64(double %932, double %934, double %931) + %936 = getelementptr inbounds %struct.cmplx, ptr %41, i32 0, i32 1 + store double %935, ptr %936, align 8 + %937 = load double, ptr %19, align 8 + %938 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 1 + %939 = load double, ptr %938, align 8 + %940 = load double, ptr %23, align 8 + %941 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 1 + %942 = load double, ptr %941, align 8 + %943 = fmul double %940, %942 + %944 = fneg double %943 + %945 = call double @llvm.fmuladd.f64(double %937, double %939, double %944) + %946 = load double, ptr %17, align 8 + %947 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 1 + %948 = load double, ptr %947, align 8 + %949 = fneg double %946 + %950 = call double @llvm.fmuladd.f64(double %949, double %948, double %945) + %951 = load double, ptr %15, align 8 + %952 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 1 + %953 = load double, ptr %952, align 8 + %954 = call double @llvm.fmuladd.f64(double %951, double %953, double %950) + %955 = load double, ptr %21, align 8 + %956 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 1 + %957 = load double, ptr %956, align 8 + %958 = call double @llvm.fmuladd.f64(double %955, double %957, double %954) + %959 = fneg double %958 + %960 = getelementptr inbounds %struct.cmplx, ptr %41, i32 0, i32 0 + store double %959, ptr %960, align 8 + %961 = getelementptr inbounds %struct.cmplx, ptr %40, i32 0, i32 0 + %962 = load double, ptr %961, align 8 + %963 = getelementptr inbounds %struct.cmplx, ptr %41, i32 0, i32 0 + %964 = load double, ptr %963, align 8 + %965 = fadd double %962, %964 + %966 = load ptr, ptr %10, align 4 + %967 = load i32, ptr %7, align 4 + %968 = load i32, ptr %24, align 4 + %969 = load i32, ptr %8, align 4 + %970 = mul i32 %969, 3 + %971 = add i32 %968, %970 + %972 = mul i32 %967, %971 + %973 = add i32 0, %972 + %974 = getelementptr inbounds %struct.cmplx, ptr %966, i32 %973 + %975 = getelementptr inbounds %struct.cmplx, ptr %974, i32 0, i32 0 + store double %965, ptr %975, align 8 + %976 = getelementptr inbounds %struct.cmplx, ptr %40, i32 0, i32 1 + %977 = load double, ptr %976, align 8 + %978 = getelementptr inbounds %struct.cmplx, ptr %41, i32 0, i32 1 + %979 = load double, ptr %978, align 8 + %980 = fadd double %977, %979 + %981 = load ptr, ptr %10, align 4 + %982 = load i32, ptr %7, align 4 + %983 = load i32, ptr %24, align 4 + %984 = load i32, ptr %8, align 4 + %985 = mul i32 %984, 3 + %986 = add i32 %983, %985 + %987 = mul i32 %982, %986 + %988 = add i32 0, %987 + %989 = getelementptr inbounds %struct.cmplx, ptr %981, i32 %988 + %990 = getelementptr inbounds %struct.cmplx, ptr %989, i32 0, i32 1 + store double %980, ptr %990, align 8 + %991 = getelementptr inbounds %struct.cmplx, ptr %40, i32 0, i32 0 + %992 = load double, ptr %991, align 8 + %993 = getelementptr inbounds %struct.cmplx, ptr %41, i32 0, i32 0 + %994 = load double, ptr %993, align 8 + %995 = fsub double %992, %994 + %996 = load ptr, ptr %10, align 4 + %997 = load i32, ptr %7, align 4 + %998 = load i32, ptr %24, align 4 + %999 = load i32, ptr %8, align 4 + %1000 = mul i32 %999, 8 + %1001 = add i32 %998, %1000 + %1002 = mul i32 %997, %1001 + %1003 = add i32 0, %1002 + %1004 = getelementptr inbounds %struct.cmplx, ptr %996, i32 %1003 + %1005 = getelementptr inbounds %struct.cmplx, ptr %1004, i32 0, i32 0 + store double %995, ptr %1005, align 8 + %1006 = getelementptr inbounds %struct.cmplx, ptr %40, i32 0, i32 1 + %1007 = load double, ptr %1006, align 8 + %1008 = getelementptr inbounds %struct.cmplx, ptr %41, i32 0, i32 1 + %1009 = load double, ptr %1008, align 8 + %1010 = fsub double %1007, %1009 + %1011 = load ptr, ptr %10, align 4 + %1012 = load i32, ptr %7, align 4 + %1013 = load i32, ptr %24, align 4 + %1014 = load i32, ptr %8, align 4 + %1015 = mul i32 %1014, 8 + %1016 = add i32 %1013, %1015 + %1017 = mul i32 %1012, %1016 + %1018 = add i32 0, %1017 + %1019 = getelementptr inbounds %struct.cmplx, ptr %1011, i32 %1018 + %1020 = getelementptr inbounds %struct.cmplx, ptr %1019, i32 0, i32 1 + store double %1010, ptr %1020, align 8 + %1021 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 0 + %1022 = load double, ptr %1021, align 8 + %1023 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 0 + %1024 = load double, ptr %1023, align 8 + %1025 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %1024, double %1022) + %1026 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 0 + %1027 = load double, ptr %1026, align 8 + %1028 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %1027, double %1025) + %1029 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 0 + %1030 = load double, ptr %1029, align 8 + %1031 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %1030, double %1028) + %1032 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 0 + %1033 = load double, ptr %1032, align 8 + %1034 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %1033, double %1031) + %1035 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 0 + %1036 = load double, ptr %1035, align 8 + %1037 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %1036, double %1034) + %1038 = getelementptr inbounds %struct.cmplx, ptr %42, i32 0, i32 0 + store double %1037, ptr %1038, align 8 + %1039 = getelementptr inbounds %struct.cmplx, ptr %25, i32 0, i32 1 + %1040 = load double, ptr %1039, align 8 + %1041 = getelementptr inbounds %struct.cmplx, ptr %26, i32 0, i32 1 + %1042 = load double, ptr %1041, align 8 + %1043 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %1042, double %1040) + %1044 = getelementptr inbounds %struct.cmplx, ptr %27, i32 0, i32 1 + %1045 = load double, ptr %1044, align 8 + %1046 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %1045, double %1043) + %1047 = getelementptr inbounds %struct.cmplx, ptr %28, i32 0, i32 1 + %1048 = load double, ptr %1047, align 8 + %1049 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %1048, double %1046) + %1050 = getelementptr inbounds %struct.cmplx, ptr %29, i32 0, i32 1 + %1051 = load double, ptr %1050, align 8 + %1052 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %1051, double %1049) + %1053 = getelementptr inbounds %struct.cmplx, ptr %30, i32 0, i32 1 + %1054 = load double, ptr %1053, align 8 + %1055 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %1054, double %1052) + %1056 = getelementptr inbounds %struct.cmplx, ptr %42, i32 0, i32 1 + store double %1055, ptr %1056, align 8 + %1057 = load double, ptr %21, align 8 + %1058 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 0 + %1059 = load double, ptr %1058, align 8 + %1060 = load double, ptr %19, align 8 + %1061 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 0 + %1062 = load double, ptr %1061, align 8 + %1063 = fmul double %1060, %1062 + %1064 = fneg double %1063 + %1065 = call double @llvm.fmuladd.f64(double %1057, double %1059, double %1064) + %1066 = load double, ptr %15, align 8 + %1067 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 0 + %1068 = load double, ptr %1067, align 8 + %1069 = call double @llvm.fmuladd.f64(double %1066, double %1068, double %1065) + %1070 = load double, ptr %23, align 8 + %1071 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 0 + %1072 = load double, ptr %1071, align 8 + %1073 = call double @llvm.fmuladd.f64(double %1070, double %1072, double %1069) + %1074 = load double, ptr %17, align 8 + %1075 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 0 + %1076 = load double, ptr %1075, align 8 + %1077 = fneg double %1074 + %1078 = call double @llvm.fmuladd.f64(double %1077, double %1076, double %1073) + %1079 = getelementptr inbounds %struct.cmplx, ptr %43, i32 0, i32 1 + store double %1078, ptr %1079, align 8 + %1080 = load double, ptr %21, align 8 + %1081 = getelementptr inbounds %struct.cmplx, ptr %35, i32 0, i32 1 + %1082 = load double, ptr %1081, align 8 + %1083 = load double, ptr %19, align 8 + %1084 = getelementptr inbounds %struct.cmplx, ptr %34, i32 0, i32 1 + %1085 = load double, ptr %1084, align 8 + %1086 = fmul double %1083, %1085 + %1087 = fneg double %1086 + %1088 = call double @llvm.fmuladd.f64(double %1080, double %1082, double %1087) + %1089 = load double, ptr %15, align 8 + %1090 = getelementptr inbounds %struct.cmplx, ptr %33, i32 0, i32 1 + %1091 = load double, ptr %1090, align 8 + %1092 = call double @llvm.fmuladd.f64(double %1089, double %1091, double %1088) + %1093 = load double, ptr %23, align 8 + %1094 = getelementptr inbounds %struct.cmplx, ptr %32, i32 0, i32 1 + %1095 = load double, ptr %1094, align 8 + %1096 = call double @llvm.fmuladd.f64(double %1093, double %1095, double %1092) + %1097 = load double, ptr %17, align 8 + %1098 = getelementptr inbounds %struct.cmplx, ptr %31, i32 0, i32 1 + %1099 = load double, ptr %1098, align 8 + %1100 = fneg double %1097 + %1101 = call double @llvm.fmuladd.f64(double %1100, double %1099, double %1096) + %1102 = fneg double %1101 + %1103 = getelementptr inbounds %struct.cmplx, ptr %43, i32 0, i32 0 + store double %1102, ptr %1103, align 8 + %1104 = getelementptr inbounds %struct.cmplx, ptr %42, i32 0, i32 0 + %1105 = load double, ptr %1104, align 8 + %1106 = getelementptr inbounds %struct.cmplx, ptr %43, i32 0, i32 0 + %1107 = load double, ptr %1106, align 8 + %1108 = fadd double %1105, %1107 + %1109 = load ptr, ptr %10, align 4 + %1110 = load i32, ptr %7, align 4 + %1111 = load i32, ptr %24, align 4 + %1112 = load i32, ptr %8, align 4 + %1113 = mul i32 %1112, 4 + %1114 = add i32 %1111, %1113 + %1115 = mul i32 %1110, %1114 + %1116 = add i32 0, %1115 + %1117 = getelementptr inbounds %struct.cmplx, ptr %1109, i32 %1116 + %1118 = getelementptr inbounds %struct.cmplx, ptr %1117, i32 0, i32 0 + store double %1108, ptr %1118, align 8 + %1119 = getelementptr inbounds %struct.cmplx, ptr %42, i32 0, i32 1 + %1120 = load double, ptr %1119, align 8 + %1121 = getelementptr inbounds %struct.cmplx, ptr %43, i32 0, i32 1 + %1122 = load double, ptr %1121, align 8 + %1123 = fadd double %1120, %1122 + %1124 = load ptr, ptr %10, align 4 + %1125 = load i32, ptr %7, align 4 + %1126 = load i32, ptr %24, align 4 + %1127 = load i32, ptr %8, align 4 + %1128 = mul i32 %1127, 4 + %1129 = add i32 %1126, %1128 + %1130 = mul i32 %1125, %1129 + %1131 = add i32 0, %1130 + %1132 = getelementptr inbounds %struct.cmplx, ptr %1124, i32 %1131 + %1133 = getelementptr inbounds %struct.cmplx, ptr %1132, i32 0, i32 1 + store double %1123, ptr %1133, align 8 + %1134 = getelementptr inbounds %struct.cmplx, ptr %42, i32 0, i32 0 + %1135 = load double, ptr %1134, align 8 + %1136 = getelementptr inbounds %struct.cmplx, ptr %43, i32 0, i32 0 + %1137 = load double, ptr %1136, align 8 + %1138 = fsub double %1135, %1137 + %1139 = load ptr, ptr %10, align 4 + %1140 = load i32, ptr %7, align 4 + %1141 = load i32, ptr %24, align 4 + %1142 = load i32, ptr %8, align 4 + %1143 = mul i32 %1142, 7 + %1144 = add i32 %1141, %1143 + %1145 = mul i32 %1140, %1144 + %1146 = add i32 0, %1145 + %1147 = getelementptr inbounds %struct.cmplx, ptr %1139, i32 %1146 + %1148 = getelementptr inbounds %struct.cmplx, ptr %1147, i32 0, i32 0 + store double %1138, ptr %1148, align 8 + %1149 = getelementptr inbounds %struct.cmplx, ptr %42, i32 0, i32 1 + %1150 = load double, ptr %1149, align 8 + %1151 = getelementptr inbounds %struct.cmplx, ptr %43, i32 0, i32 1 + %1152 = load double, ptr %1151, align 8 + %1153 = fsub double %1150, %1152 + %1154 = load ptr, ptr %10, align 4 + %1155 = load i32, ptr %7, align 4 + %1156 = load i32, ptr %24, align 4 + %1157 = load i32, ptr %8, align 4 + %1158 = mul i32 %1157, 7 + %1159 = add i32 %1156, %1158 + %1160 = mul i32 %1155, %1159 + %1161 = add i32 0, %1160 + %1162 = getelementptr inbounds %struct.cmplx, ptr %1154, i32 %1161 + %1163 = getelementptr inbounds %struct.cmplx, ptr %1162, i32 0, i32 1 + store double %1153, ptr %1163, align 8 + store i32 1, ptr %44, align 4 + br label %1164 + +1164: ; preds = %2788, %91 + %1165 = load i32, ptr %44, align 4 + %1166 = load i32, ptr %7, align 4 + %1167 = icmp ult i32 %1165, %1166 + br i1 %1167, label %1168, label %2791 + +1168: ; preds = %1164 + %1169 = load ptr, ptr %9, align 4 + %1170 = load i32, ptr %44, align 4 + %1171 = load i32, ptr %7, align 4 + %1172 = load i32, ptr %24, align 4 + %1173 = mul i32 11, %1172 + %1174 = add i32 0, %1173 + %1175 = mul i32 %1171, %1174 + %1176 = add i32 %1170, %1175 + %1177 = getelementptr inbounds %struct.cmplx, ptr %1169, i32 %1176 + call void @llvm.memcpy.p0.p0.i32(ptr align 8 %45, ptr align 8 %1177, i32 16, i1 false) + %1178 = load ptr, ptr %9, align 4 + %1179 = load i32, ptr %44, align 4 + %1180 = load i32, ptr %7, align 4 + %1181 = load i32, ptr %24, align 4 + %1182 = mul i32 11, %1181 + %1183 = add i32 1, %1182 + %1184 = mul i32 %1180, %1183 + %1185 = add i32 %1179, %1184 + %1186 = getelementptr inbounds %struct.cmplx, ptr %1178, i32 %1185 + %1187 = getelementptr inbounds %struct.cmplx, ptr %1186, i32 0, i32 0 + %1188 = load double, ptr %1187, align 8 + %1189 = load ptr, ptr %9, align 4 + %1190 = load i32, ptr %44, align 4 + %1191 = load i32, ptr %7, align 4 + %1192 = load i32, ptr %24, align 4 + %1193 = mul i32 11, %1192 + %1194 = add i32 10, %1193 + %1195 = mul i32 %1191, %1194 + %1196 = add i32 %1190, %1195 + %1197 = getelementptr inbounds %struct.cmplx, ptr %1189, i32 %1196 + %1198 = getelementptr inbounds %struct.cmplx, ptr %1197, i32 0, i32 0 + %1199 = load double, ptr %1198, align 8 + %1200 = fadd double %1188, %1199 + %1201 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 0 + store double %1200, ptr %1201, align 8 + %1202 = load ptr, ptr %9, align 4 + %1203 = load i32, ptr %44, align 4 + %1204 = load i32, ptr %7, align 4 + %1205 = load i32, ptr %24, align 4 + %1206 = mul i32 11, %1205 + %1207 = add i32 1, %1206 + %1208 = mul i32 %1204, %1207 + %1209 = add i32 %1203, %1208 + %1210 = getelementptr inbounds %struct.cmplx, ptr %1202, i32 %1209 + %1211 = getelementptr inbounds %struct.cmplx, ptr %1210, i32 0, i32 1 + %1212 = load double, ptr %1211, align 8 + %1213 = load ptr, ptr %9, align 4 + %1214 = load i32, ptr %44, align 4 + %1215 = load i32, ptr %7, align 4 + %1216 = load i32, ptr %24, align 4 + %1217 = mul i32 11, %1216 + %1218 = add i32 10, %1217 + %1219 = mul i32 %1215, %1218 + %1220 = add i32 %1214, %1219 + %1221 = getelementptr inbounds %struct.cmplx, ptr %1213, i32 %1220 + %1222 = getelementptr inbounds %struct.cmplx, ptr %1221, i32 0, i32 1 + %1223 = load double, ptr %1222, align 8 + %1224 = fadd double %1212, %1223 + %1225 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 1 + store double %1224, ptr %1225, align 8 + %1226 = load ptr, ptr %9, align 4 + %1227 = load i32, ptr %44, align 4 + %1228 = load i32, ptr %7, align 4 + %1229 = load i32, ptr %24, align 4 + %1230 = mul i32 11, %1229 + %1231 = add i32 1, %1230 + %1232 = mul i32 %1228, %1231 + %1233 = add i32 %1227, %1232 + %1234 = getelementptr inbounds %struct.cmplx, ptr %1226, i32 %1233 + %1235 = getelementptr inbounds %struct.cmplx, ptr %1234, i32 0, i32 0 + %1236 = load double, ptr %1235, align 8 + %1237 = load ptr, ptr %9, align 4 + %1238 = load i32, ptr %44, align 4 + %1239 = load i32, ptr %7, align 4 + %1240 = load i32, ptr %24, align 4 + %1241 = mul i32 11, %1240 + %1242 = add i32 10, %1241 + %1243 = mul i32 %1239, %1242 + %1244 = add i32 %1238, %1243 + %1245 = getelementptr inbounds %struct.cmplx, ptr %1237, i32 %1244 + %1246 = getelementptr inbounds %struct.cmplx, ptr %1245, i32 0, i32 0 + %1247 = load double, ptr %1246, align 8 + %1248 = fsub double %1236, %1247 + %1249 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 0 + store double %1248, ptr %1249, align 8 + %1250 = load ptr, ptr %9, align 4 + %1251 = load i32, ptr %44, align 4 + %1252 = load i32, ptr %7, align 4 + %1253 = load i32, ptr %24, align 4 + %1254 = mul i32 11, %1253 + %1255 = add i32 1, %1254 + %1256 = mul i32 %1252, %1255 + %1257 = add i32 %1251, %1256 + %1258 = getelementptr inbounds %struct.cmplx, ptr %1250, i32 %1257 + %1259 = getelementptr inbounds %struct.cmplx, ptr %1258, i32 0, i32 1 + %1260 = load double, ptr %1259, align 8 + %1261 = load ptr, ptr %9, align 4 + %1262 = load i32, ptr %44, align 4 + %1263 = load i32, ptr %7, align 4 + %1264 = load i32, ptr %24, align 4 + %1265 = mul i32 11, %1264 + %1266 = add i32 10, %1265 + %1267 = mul i32 %1263, %1266 + %1268 = add i32 %1262, %1267 + %1269 = getelementptr inbounds %struct.cmplx, ptr %1261, i32 %1268 + %1270 = getelementptr inbounds %struct.cmplx, ptr %1269, i32 0, i32 1 + %1271 = load double, ptr %1270, align 8 + %1272 = fsub double %1260, %1271 + %1273 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 1 + store double %1272, ptr %1273, align 8 + %1274 = load ptr, ptr %9, align 4 + %1275 = load i32, ptr %44, align 4 + %1276 = load i32, ptr %7, align 4 + %1277 = load i32, ptr %24, align 4 + %1278 = mul i32 11, %1277 + %1279 = add i32 2, %1278 + %1280 = mul i32 %1276, %1279 + %1281 = add i32 %1275, %1280 + %1282 = getelementptr inbounds %struct.cmplx, ptr %1274, i32 %1281 + %1283 = getelementptr inbounds %struct.cmplx, ptr %1282, i32 0, i32 0 + %1284 = load double, ptr %1283, align 8 + %1285 = load ptr, ptr %9, align 4 + %1286 = load i32, ptr %44, align 4 + %1287 = load i32, ptr %7, align 4 + %1288 = load i32, ptr %24, align 4 + %1289 = mul i32 11, %1288 + %1290 = add i32 9, %1289 + %1291 = mul i32 %1287, %1290 + %1292 = add i32 %1286, %1291 + %1293 = getelementptr inbounds %struct.cmplx, ptr %1285, i32 %1292 + %1294 = getelementptr inbounds %struct.cmplx, ptr %1293, i32 0, i32 0 + %1295 = load double, ptr %1294, align 8 + %1296 = fadd double %1284, %1295 + %1297 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 0 + store double %1296, ptr %1297, align 8 + %1298 = load ptr, ptr %9, align 4 + %1299 = load i32, ptr %44, align 4 + %1300 = load i32, ptr %7, align 4 + %1301 = load i32, ptr %24, align 4 + %1302 = mul i32 11, %1301 + %1303 = add i32 2, %1302 + %1304 = mul i32 %1300, %1303 + %1305 = add i32 %1299, %1304 + %1306 = getelementptr inbounds %struct.cmplx, ptr %1298, i32 %1305 + %1307 = getelementptr inbounds %struct.cmplx, ptr %1306, i32 0, i32 1 + %1308 = load double, ptr %1307, align 8 + %1309 = load ptr, ptr %9, align 4 + %1310 = load i32, ptr %44, align 4 + %1311 = load i32, ptr %7, align 4 + %1312 = load i32, ptr %24, align 4 + %1313 = mul i32 11, %1312 + %1314 = add i32 9, %1313 + %1315 = mul i32 %1311, %1314 + %1316 = add i32 %1310, %1315 + %1317 = getelementptr inbounds %struct.cmplx, ptr %1309, i32 %1316 + %1318 = getelementptr inbounds %struct.cmplx, ptr %1317, i32 0, i32 1 + %1319 = load double, ptr %1318, align 8 + %1320 = fadd double %1308, %1319 + %1321 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 1 + store double %1320, ptr %1321, align 8 + %1322 = load ptr, ptr %9, align 4 + %1323 = load i32, ptr %44, align 4 + %1324 = load i32, ptr %7, align 4 + %1325 = load i32, ptr %24, align 4 + %1326 = mul i32 11, %1325 + %1327 = add i32 2, %1326 + %1328 = mul i32 %1324, %1327 + %1329 = add i32 %1323, %1328 + %1330 = getelementptr inbounds %struct.cmplx, ptr %1322, i32 %1329 + %1331 = getelementptr inbounds %struct.cmplx, ptr %1330, i32 0, i32 0 + %1332 = load double, ptr %1331, align 8 + %1333 = load ptr, ptr %9, align 4 + %1334 = load i32, ptr %44, align 4 + %1335 = load i32, ptr %7, align 4 + %1336 = load i32, ptr %24, align 4 + %1337 = mul i32 11, %1336 + %1338 = add i32 9, %1337 + %1339 = mul i32 %1335, %1338 + %1340 = add i32 %1334, %1339 + %1341 = getelementptr inbounds %struct.cmplx, ptr %1333, i32 %1340 + %1342 = getelementptr inbounds %struct.cmplx, ptr %1341, i32 0, i32 0 + %1343 = load double, ptr %1342, align 8 + %1344 = fsub double %1332, %1343 + %1345 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 0 + store double %1344, ptr %1345, align 8 + %1346 = load ptr, ptr %9, align 4 + %1347 = load i32, ptr %44, align 4 + %1348 = load i32, ptr %7, align 4 + %1349 = load i32, ptr %24, align 4 + %1350 = mul i32 11, %1349 + %1351 = add i32 2, %1350 + %1352 = mul i32 %1348, %1351 + %1353 = add i32 %1347, %1352 + %1354 = getelementptr inbounds %struct.cmplx, ptr %1346, i32 %1353 + %1355 = getelementptr inbounds %struct.cmplx, ptr %1354, i32 0, i32 1 + %1356 = load double, ptr %1355, align 8 + %1357 = load ptr, ptr %9, align 4 + %1358 = load i32, ptr %44, align 4 + %1359 = load i32, ptr %7, align 4 + %1360 = load i32, ptr %24, align 4 + %1361 = mul i32 11, %1360 + %1362 = add i32 9, %1361 + %1363 = mul i32 %1359, %1362 + %1364 = add i32 %1358, %1363 + %1365 = getelementptr inbounds %struct.cmplx, ptr %1357, i32 %1364 + %1366 = getelementptr inbounds %struct.cmplx, ptr %1365, i32 0, i32 1 + %1367 = load double, ptr %1366, align 8 + %1368 = fsub double %1356, %1367 + %1369 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 1 + store double %1368, ptr %1369, align 8 + %1370 = load ptr, ptr %9, align 4 + %1371 = load i32, ptr %44, align 4 + %1372 = load i32, ptr %7, align 4 + %1373 = load i32, ptr %24, align 4 + %1374 = mul i32 11, %1373 + %1375 = add i32 3, %1374 + %1376 = mul i32 %1372, %1375 + %1377 = add i32 %1371, %1376 + %1378 = getelementptr inbounds %struct.cmplx, ptr %1370, i32 %1377 + %1379 = getelementptr inbounds %struct.cmplx, ptr %1378, i32 0, i32 0 + %1380 = load double, ptr %1379, align 8 + %1381 = load ptr, ptr %9, align 4 + %1382 = load i32, ptr %44, align 4 + %1383 = load i32, ptr %7, align 4 + %1384 = load i32, ptr %24, align 4 + %1385 = mul i32 11, %1384 + %1386 = add i32 8, %1385 + %1387 = mul i32 %1383, %1386 + %1388 = add i32 %1382, %1387 + %1389 = getelementptr inbounds %struct.cmplx, ptr %1381, i32 %1388 + %1390 = getelementptr inbounds %struct.cmplx, ptr %1389, i32 0, i32 0 + %1391 = load double, ptr %1390, align 8 + %1392 = fadd double %1380, %1391 + %1393 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 0 + store double %1392, ptr %1393, align 8 + %1394 = load ptr, ptr %9, align 4 + %1395 = load i32, ptr %44, align 4 + %1396 = load i32, ptr %7, align 4 + %1397 = load i32, ptr %24, align 4 + %1398 = mul i32 11, %1397 + %1399 = add i32 3, %1398 + %1400 = mul i32 %1396, %1399 + %1401 = add i32 %1395, %1400 + %1402 = getelementptr inbounds %struct.cmplx, ptr %1394, i32 %1401 + %1403 = getelementptr inbounds %struct.cmplx, ptr %1402, i32 0, i32 1 + %1404 = load double, ptr %1403, align 8 + %1405 = load ptr, ptr %9, align 4 + %1406 = load i32, ptr %44, align 4 + %1407 = load i32, ptr %7, align 4 + %1408 = load i32, ptr %24, align 4 + %1409 = mul i32 11, %1408 + %1410 = add i32 8, %1409 + %1411 = mul i32 %1407, %1410 + %1412 = add i32 %1406, %1411 + %1413 = getelementptr inbounds %struct.cmplx, ptr %1405, i32 %1412 + %1414 = getelementptr inbounds %struct.cmplx, ptr %1413, i32 0, i32 1 + %1415 = load double, ptr %1414, align 8 + %1416 = fadd double %1404, %1415 + %1417 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 1 + store double %1416, ptr %1417, align 8 + %1418 = load ptr, ptr %9, align 4 + %1419 = load i32, ptr %44, align 4 + %1420 = load i32, ptr %7, align 4 + %1421 = load i32, ptr %24, align 4 + %1422 = mul i32 11, %1421 + %1423 = add i32 3, %1422 + %1424 = mul i32 %1420, %1423 + %1425 = add i32 %1419, %1424 + %1426 = getelementptr inbounds %struct.cmplx, ptr %1418, i32 %1425 + %1427 = getelementptr inbounds %struct.cmplx, ptr %1426, i32 0, i32 0 + %1428 = load double, ptr %1427, align 8 + %1429 = load ptr, ptr %9, align 4 + %1430 = load i32, ptr %44, align 4 + %1431 = load i32, ptr %7, align 4 + %1432 = load i32, ptr %24, align 4 + %1433 = mul i32 11, %1432 + %1434 = add i32 8, %1433 + %1435 = mul i32 %1431, %1434 + %1436 = add i32 %1430, %1435 + %1437 = getelementptr inbounds %struct.cmplx, ptr %1429, i32 %1436 + %1438 = getelementptr inbounds %struct.cmplx, ptr %1437, i32 0, i32 0 + %1439 = load double, ptr %1438, align 8 + %1440 = fsub double %1428, %1439 + %1441 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 0 + store double %1440, ptr %1441, align 8 + %1442 = load ptr, ptr %9, align 4 + %1443 = load i32, ptr %44, align 4 + %1444 = load i32, ptr %7, align 4 + %1445 = load i32, ptr %24, align 4 + %1446 = mul i32 11, %1445 + %1447 = add i32 3, %1446 + %1448 = mul i32 %1444, %1447 + %1449 = add i32 %1443, %1448 + %1450 = getelementptr inbounds %struct.cmplx, ptr %1442, i32 %1449 + %1451 = getelementptr inbounds %struct.cmplx, ptr %1450, i32 0, i32 1 + %1452 = load double, ptr %1451, align 8 + %1453 = load ptr, ptr %9, align 4 + %1454 = load i32, ptr %44, align 4 + %1455 = load i32, ptr %7, align 4 + %1456 = load i32, ptr %24, align 4 + %1457 = mul i32 11, %1456 + %1458 = add i32 8, %1457 + %1459 = mul i32 %1455, %1458 + %1460 = add i32 %1454, %1459 + %1461 = getelementptr inbounds %struct.cmplx, ptr %1453, i32 %1460 + %1462 = getelementptr inbounds %struct.cmplx, ptr %1461, i32 0, i32 1 + %1463 = load double, ptr %1462, align 8 + %1464 = fsub double %1452, %1463 + %1465 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 1 + store double %1464, ptr %1465, align 8 + %1466 = load ptr, ptr %9, align 4 + %1467 = load i32, ptr %44, align 4 + %1468 = load i32, ptr %7, align 4 + %1469 = load i32, ptr %24, align 4 + %1470 = mul i32 11, %1469 + %1471 = add i32 4, %1470 + %1472 = mul i32 %1468, %1471 + %1473 = add i32 %1467, %1472 + %1474 = getelementptr inbounds %struct.cmplx, ptr %1466, i32 %1473 + %1475 = getelementptr inbounds %struct.cmplx, ptr %1474, i32 0, i32 0 + %1476 = load double, ptr %1475, align 8 + %1477 = load ptr, ptr %9, align 4 + %1478 = load i32, ptr %44, align 4 + %1479 = load i32, ptr %7, align 4 + %1480 = load i32, ptr %24, align 4 + %1481 = mul i32 11, %1480 + %1482 = add i32 7, %1481 + %1483 = mul i32 %1479, %1482 + %1484 = add i32 %1478, %1483 + %1485 = getelementptr inbounds %struct.cmplx, ptr %1477, i32 %1484 + %1486 = getelementptr inbounds %struct.cmplx, ptr %1485, i32 0, i32 0 + %1487 = load double, ptr %1486, align 8 + %1488 = fadd double %1476, %1487 + %1489 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 0 + store double %1488, ptr %1489, align 8 + %1490 = load ptr, ptr %9, align 4 + %1491 = load i32, ptr %44, align 4 + %1492 = load i32, ptr %7, align 4 + %1493 = load i32, ptr %24, align 4 + %1494 = mul i32 11, %1493 + %1495 = add i32 4, %1494 + %1496 = mul i32 %1492, %1495 + %1497 = add i32 %1491, %1496 + %1498 = getelementptr inbounds %struct.cmplx, ptr %1490, i32 %1497 + %1499 = getelementptr inbounds %struct.cmplx, ptr %1498, i32 0, i32 1 + %1500 = load double, ptr %1499, align 8 + %1501 = load ptr, ptr %9, align 4 + %1502 = load i32, ptr %44, align 4 + %1503 = load i32, ptr %7, align 4 + %1504 = load i32, ptr %24, align 4 + %1505 = mul i32 11, %1504 + %1506 = add i32 7, %1505 + %1507 = mul i32 %1503, %1506 + %1508 = add i32 %1502, %1507 + %1509 = getelementptr inbounds %struct.cmplx, ptr %1501, i32 %1508 + %1510 = getelementptr inbounds %struct.cmplx, ptr %1509, i32 0, i32 1 + %1511 = load double, ptr %1510, align 8 + %1512 = fadd double %1500, %1511 + %1513 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 1 + store double %1512, ptr %1513, align 8 + %1514 = load ptr, ptr %9, align 4 + %1515 = load i32, ptr %44, align 4 + %1516 = load i32, ptr %7, align 4 + %1517 = load i32, ptr %24, align 4 + %1518 = mul i32 11, %1517 + %1519 = add i32 4, %1518 + %1520 = mul i32 %1516, %1519 + %1521 = add i32 %1515, %1520 + %1522 = getelementptr inbounds %struct.cmplx, ptr %1514, i32 %1521 + %1523 = getelementptr inbounds %struct.cmplx, ptr %1522, i32 0, i32 0 + %1524 = load double, ptr %1523, align 8 + %1525 = load ptr, ptr %9, align 4 + %1526 = load i32, ptr %44, align 4 + %1527 = load i32, ptr %7, align 4 + %1528 = load i32, ptr %24, align 4 + %1529 = mul i32 11, %1528 + %1530 = add i32 7, %1529 + %1531 = mul i32 %1527, %1530 + %1532 = add i32 %1526, %1531 + %1533 = getelementptr inbounds %struct.cmplx, ptr %1525, i32 %1532 + %1534 = getelementptr inbounds %struct.cmplx, ptr %1533, i32 0, i32 0 + %1535 = load double, ptr %1534, align 8 + %1536 = fsub double %1524, %1535 + %1537 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 0 + store double %1536, ptr %1537, align 8 + %1538 = load ptr, ptr %9, align 4 + %1539 = load i32, ptr %44, align 4 + %1540 = load i32, ptr %7, align 4 + %1541 = load i32, ptr %24, align 4 + %1542 = mul i32 11, %1541 + %1543 = add i32 4, %1542 + %1544 = mul i32 %1540, %1543 + %1545 = add i32 %1539, %1544 + %1546 = getelementptr inbounds %struct.cmplx, ptr %1538, i32 %1545 + %1547 = getelementptr inbounds %struct.cmplx, ptr %1546, i32 0, i32 1 + %1548 = load double, ptr %1547, align 8 + %1549 = load ptr, ptr %9, align 4 + %1550 = load i32, ptr %44, align 4 + %1551 = load i32, ptr %7, align 4 + %1552 = load i32, ptr %24, align 4 + %1553 = mul i32 11, %1552 + %1554 = add i32 7, %1553 + %1555 = mul i32 %1551, %1554 + %1556 = add i32 %1550, %1555 + %1557 = getelementptr inbounds %struct.cmplx, ptr %1549, i32 %1556 + %1558 = getelementptr inbounds %struct.cmplx, ptr %1557, i32 0, i32 1 + %1559 = load double, ptr %1558, align 8 + %1560 = fsub double %1548, %1559 + %1561 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 1 + store double %1560, ptr %1561, align 8 + %1562 = load ptr, ptr %9, align 4 + %1563 = load i32, ptr %44, align 4 + %1564 = load i32, ptr %7, align 4 + %1565 = load i32, ptr %24, align 4 + %1566 = mul i32 11, %1565 + %1567 = add i32 5, %1566 + %1568 = mul i32 %1564, %1567 + %1569 = add i32 %1563, %1568 + %1570 = getelementptr inbounds %struct.cmplx, ptr %1562, i32 %1569 + %1571 = getelementptr inbounds %struct.cmplx, ptr %1570, i32 0, i32 0 + %1572 = load double, ptr %1571, align 8 + %1573 = load ptr, ptr %9, align 4 + %1574 = load i32, ptr %44, align 4 + %1575 = load i32, ptr %7, align 4 + %1576 = load i32, ptr %24, align 4 + %1577 = mul i32 11, %1576 + %1578 = add i32 6, %1577 + %1579 = mul i32 %1575, %1578 + %1580 = add i32 %1574, %1579 + %1581 = getelementptr inbounds %struct.cmplx, ptr %1573, i32 %1580 + %1582 = getelementptr inbounds %struct.cmplx, ptr %1581, i32 0, i32 0 + %1583 = load double, ptr %1582, align 8 + %1584 = fadd double %1572, %1583 + %1585 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 0 + store double %1584, ptr %1585, align 8 + %1586 = load ptr, ptr %9, align 4 + %1587 = load i32, ptr %44, align 4 + %1588 = load i32, ptr %7, align 4 + %1589 = load i32, ptr %24, align 4 + %1590 = mul i32 11, %1589 + %1591 = add i32 5, %1590 + %1592 = mul i32 %1588, %1591 + %1593 = add i32 %1587, %1592 + %1594 = getelementptr inbounds %struct.cmplx, ptr %1586, i32 %1593 + %1595 = getelementptr inbounds %struct.cmplx, ptr %1594, i32 0, i32 1 + %1596 = load double, ptr %1595, align 8 + %1597 = load ptr, ptr %9, align 4 + %1598 = load i32, ptr %44, align 4 + %1599 = load i32, ptr %7, align 4 + %1600 = load i32, ptr %24, align 4 + %1601 = mul i32 11, %1600 + %1602 = add i32 6, %1601 + %1603 = mul i32 %1599, %1602 + %1604 = add i32 %1598, %1603 + %1605 = getelementptr inbounds %struct.cmplx, ptr %1597, i32 %1604 + %1606 = getelementptr inbounds %struct.cmplx, ptr %1605, i32 0, i32 1 + %1607 = load double, ptr %1606, align 8 + %1608 = fadd double %1596, %1607 + %1609 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 1 + store double %1608, ptr %1609, align 8 + %1610 = load ptr, ptr %9, align 4 + %1611 = load i32, ptr %44, align 4 + %1612 = load i32, ptr %7, align 4 + %1613 = load i32, ptr %24, align 4 + %1614 = mul i32 11, %1613 + %1615 = add i32 5, %1614 + %1616 = mul i32 %1612, %1615 + %1617 = add i32 %1611, %1616 + %1618 = getelementptr inbounds %struct.cmplx, ptr %1610, i32 %1617 + %1619 = getelementptr inbounds %struct.cmplx, ptr %1618, i32 0, i32 0 + %1620 = load double, ptr %1619, align 8 + %1621 = load ptr, ptr %9, align 4 + %1622 = load i32, ptr %44, align 4 + %1623 = load i32, ptr %7, align 4 + %1624 = load i32, ptr %24, align 4 + %1625 = mul i32 11, %1624 + %1626 = add i32 6, %1625 + %1627 = mul i32 %1623, %1626 + %1628 = add i32 %1622, %1627 + %1629 = getelementptr inbounds %struct.cmplx, ptr %1621, i32 %1628 + %1630 = getelementptr inbounds %struct.cmplx, ptr %1629, i32 0, i32 0 + %1631 = load double, ptr %1630, align 8 + %1632 = fsub double %1620, %1631 + %1633 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 0 + store double %1632, ptr %1633, align 8 + %1634 = load ptr, ptr %9, align 4 + %1635 = load i32, ptr %44, align 4 + %1636 = load i32, ptr %7, align 4 + %1637 = load i32, ptr %24, align 4 + %1638 = mul i32 11, %1637 + %1639 = add i32 5, %1638 + %1640 = mul i32 %1636, %1639 + %1641 = add i32 %1635, %1640 + %1642 = getelementptr inbounds %struct.cmplx, ptr %1634, i32 %1641 + %1643 = getelementptr inbounds %struct.cmplx, ptr %1642, i32 0, i32 1 + %1644 = load double, ptr %1643, align 8 + %1645 = load ptr, ptr %9, align 4 + %1646 = load i32, ptr %44, align 4 + %1647 = load i32, ptr %7, align 4 + %1648 = load i32, ptr %24, align 4 + %1649 = mul i32 11, %1648 + %1650 = add i32 6, %1649 + %1651 = mul i32 %1647, %1650 + %1652 = add i32 %1646, %1651 + %1653 = getelementptr inbounds %struct.cmplx, ptr %1645, i32 %1652 + %1654 = getelementptr inbounds %struct.cmplx, ptr %1653, i32 0, i32 1 + %1655 = load double, ptr %1654, align 8 + %1656 = fsub double %1644, %1655 + %1657 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 1 + store double %1656, ptr %1657, align 8 + %1658 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 0 + %1659 = load double, ptr %1658, align 8 + %1660 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 0 + %1661 = load double, ptr %1660, align 8 + %1662 = fadd double %1659, %1661 + %1663 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 0 + %1664 = load double, ptr %1663, align 8 + %1665 = fadd double %1662, %1664 + %1666 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 0 + %1667 = load double, ptr %1666, align 8 + %1668 = fadd double %1665, %1667 + %1669 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 0 + %1670 = load double, ptr %1669, align 8 + %1671 = fadd double %1668, %1670 + %1672 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 0 + %1673 = load double, ptr %1672, align 8 + %1674 = fadd double %1671, %1673 + %1675 = load ptr, ptr %10, align 4 + %1676 = load i32, ptr %44, align 4 + %1677 = load i32, ptr %7, align 4 + %1678 = load i32, ptr %24, align 4 + %1679 = load i32, ptr %8, align 4 + %1680 = mul i32 %1679, 0 + %1681 = add i32 %1678, %1680 + %1682 = mul i32 %1677, %1681 + %1683 = add i32 %1676, %1682 + %1684 = getelementptr inbounds %struct.cmplx, ptr %1675, i32 %1683 + %1685 = getelementptr inbounds %struct.cmplx, ptr %1684, i32 0, i32 0 + store double %1674, ptr %1685, align 8 + %1686 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 1 + %1687 = load double, ptr %1686, align 8 + %1688 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 1 + %1689 = load double, ptr %1688, align 8 + %1690 = fadd double %1687, %1689 + %1691 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 1 + %1692 = load double, ptr %1691, align 8 + %1693 = fadd double %1690, %1692 + %1694 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 1 + %1695 = load double, ptr %1694, align 8 + %1696 = fadd double %1693, %1695 + %1697 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 1 + %1698 = load double, ptr %1697, align 8 + %1699 = fadd double %1696, %1698 + %1700 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 1 + %1701 = load double, ptr %1700, align 8 + %1702 = fadd double %1699, %1701 + %1703 = load ptr, ptr %10, align 4 + %1704 = load i32, ptr %44, align 4 + %1705 = load i32, ptr %7, align 4 + %1706 = load i32, ptr %24, align 4 + %1707 = load i32, ptr %8, align 4 + %1708 = mul i32 %1707, 0 + %1709 = add i32 %1706, %1708 + %1710 = mul i32 %1705, %1709 + %1711 = add i32 %1704, %1710 + %1712 = getelementptr inbounds %struct.cmplx, ptr %1703, i32 %1711 + %1713 = getelementptr inbounds %struct.cmplx, ptr %1712, i32 0, i32 1 + store double %1702, ptr %1713, align 8 + %1714 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 0 + %1715 = load double, ptr %1714, align 8 + %1716 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 0 + %1717 = load double, ptr %1716, align 8 + %1718 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %1717, double %1715) + %1719 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 0 + %1720 = load double, ptr %1719, align 8 + %1721 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %1720, double %1718) + %1722 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 0 + %1723 = load double, ptr %1722, align 8 + %1724 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %1723, double %1721) + %1725 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 0 + %1726 = load double, ptr %1725, align 8 + %1727 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %1726, double %1724) + %1728 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 0 + %1729 = load double, ptr %1728, align 8 + %1730 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %1729, double %1727) + %1731 = getelementptr inbounds %struct.cmplx, ptr %58, i32 0, i32 0 + store double %1730, ptr %1731, align 8 + %1732 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 1 + %1733 = load double, ptr %1732, align 8 + %1734 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 1 + %1735 = load double, ptr %1734, align 8 + %1736 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %1735, double %1733) + %1737 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 1 + %1738 = load double, ptr %1737, align 8 + %1739 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %1738, double %1736) + %1740 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 1 + %1741 = load double, ptr %1740, align 8 + %1742 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %1741, double %1739) + %1743 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 1 + %1744 = load double, ptr %1743, align 8 + %1745 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %1744, double %1742) + %1746 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 1 + %1747 = load double, ptr %1746, align 8 + %1748 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %1747, double %1745) + %1749 = getelementptr inbounds %struct.cmplx, ptr %58, i32 0, i32 1 + store double %1748, ptr %1749, align 8 + %1750 = load double, ptr %15, align 8 + %1751 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 0 + %1752 = load double, ptr %1751, align 8 + %1753 = load double, ptr %17, align 8 + %1754 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 0 + %1755 = load double, ptr %1754, align 8 + %1756 = fmul double %1753, %1755 + %1757 = call double @llvm.fmuladd.f64(double %1750, double %1752, double %1756) + %1758 = load double, ptr %19, align 8 + %1759 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 0 + %1760 = load double, ptr %1759, align 8 + %1761 = call double @llvm.fmuladd.f64(double %1758, double %1760, double %1757) + %1762 = load double, ptr %21, align 8 + %1763 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 0 + %1764 = load double, ptr %1763, align 8 + %1765 = call double @llvm.fmuladd.f64(double %1762, double %1764, double %1761) + %1766 = load double, ptr %23, align 8 + %1767 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 0 + %1768 = load double, ptr %1767, align 8 + %1769 = call double @llvm.fmuladd.f64(double %1766, double %1768, double %1765) + %1770 = getelementptr inbounds %struct.cmplx, ptr %59, i32 0, i32 1 + store double %1769, ptr %1770, align 8 + %1771 = load double, ptr %15, align 8 + %1772 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 1 + %1773 = load double, ptr %1772, align 8 + %1774 = load double, ptr %17, align 8 + %1775 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 1 + %1776 = load double, ptr %1775, align 8 + %1777 = fmul double %1774, %1776 + %1778 = call double @llvm.fmuladd.f64(double %1771, double %1773, double %1777) + %1779 = load double, ptr %19, align 8 + %1780 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 1 + %1781 = load double, ptr %1780, align 8 + %1782 = call double @llvm.fmuladd.f64(double %1779, double %1781, double %1778) + %1783 = load double, ptr %21, align 8 + %1784 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 1 + %1785 = load double, ptr %1784, align 8 + %1786 = call double @llvm.fmuladd.f64(double %1783, double %1785, double %1782) + %1787 = load double, ptr %23, align 8 + %1788 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 1 + %1789 = load double, ptr %1788, align 8 + %1790 = call double @llvm.fmuladd.f64(double %1787, double %1789, double %1786) + %1791 = fneg double %1790 + %1792 = getelementptr inbounds %struct.cmplx, ptr %59, i32 0, i32 0 + store double %1791, ptr %1792, align 8 + %1793 = getelementptr inbounds %struct.cmplx, ptr %58, i32 0, i32 0 + %1794 = load double, ptr %1793, align 8 + %1795 = getelementptr inbounds %struct.cmplx, ptr %59, i32 0, i32 0 + %1796 = load double, ptr %1795, align 8 + %1797 = fadd double %1794, %1796 + %1798 = getelementptr inbounds %struct.cmplx, ptr %56, i32 0, i32 0 + store double %1797, ptr %1798, align 8 + %1799 = getelementptr inbounds %struct.cmplx, ptr %58, i32 0, i32 1 + %1800 = load double, ptr %1799, align 8 + %1801 = getelementptr inbounds %struct.cmplx, ptr %59, i32 0, i32 1 + %1802 = load double, ptr %1801, align 8 + %1803 = fadd double %1800, %1802 + %1804 = getelementptr inbounds %struct.cmplx, ptr %56, i32 0, i32 1 + store double %1803, ptr %1804, align 8 + %1805 = getelementptr inbounds %struct.cmplx, ptr %58, i32 0, i32 0 + %1806 = load double, ptr %1805, align 8 + %1807 = getelementptr inbounds %struct.cmplx, ptr %59, i32 0, i32 0 + %1808 = load double, ptr %1807, align 8 + %1809 = fsub double %1806, %1808 + %1810 = getelementptr inbounds %struct.cmplx, ptr %57, i32 0, i32 0 + store double %1809, ptr %1810, align 8 + %1811 = getelementptr inbounds %struct.cmplx, ptr %58, i32 0, i32 1 + %1812 = load double, ptr %1811, align 8 + %1813 = getelementptr inbounds %struct.cmplx, ptr %59, i32 0, i32 1 + %1814 = load double, ptr %1813, align 8 + %1815 = fsub double %1812, %1814 + %1816 = getelementptr inbounds %struct.cmplx, ptr %57, i32 0, i32 1 + store double %1815, ptr %1816, align 8 + %1817 = load ptr, ptr %11, align 4 + %1818 = load i32, ptr %44, align 4 + %1819 = sub i32 %1818, 1 + %1820 = load i32, ptr %7, align 4 + %1821 = sub i32 %1820, 1 + %1822 = mul i32 0, %1821 + %1823 = add i32 %1819, %1822 + %1824 = getelementptr inbounds %struct.cmplx, ptr %1817, i32 %1823 + %1825 = getelementptr inbounds %struct.cmplx, ptr %1824, i32 0, i32 0 + %1826 = load double, ptr %1825, align 8 + %1827 = getelementptr inbounds %struct.cmplx, ptr %56, i32 0, i32 0 + %1828 = load double, ptr %1827, align 8 + %1829 = load i32, ptr %12, align 4 + %1830 = sitofp i32 %1829 to double + %1831 = load ptr, ptr %11, align 4 + %1832 = load i32, ptr %44, align 4 + %1833 = sub i32 %1832, 1 + %1834 = load i32, ptr %7, align 4 + %1835 = sub i32 %1834, 1 + %1836 = mul i32 0, %1835 + %1837 = add i32 %1833, %1836 + %1838 = getelementptr inbounds %struct.cmplx, ptr %1831, i32 %1837 + %1839 = getelementptr inbounds %struct.cmplx, ptr %1838, i32 0, i32 1 + %1840 = load double, ptr %1839, align 8 + %1841 = fmul double %1830, %1840 + %1842 = getelementptr inbounds %struct.cmplx, ptr %56, i32 0, i32 1 + %1843 = load double, ptr %1842, align 8 + %1844 = fmul double %1841, %1843 + %1845 = fneg double %1844 + %1846 = call double @llvm.fmuladd.f64(double %1826, double %1828, double %1845) + %1847 = load ptr, ptr %10, align 4 + %1848 = load i32, ptr %44, align 4 + %1849 = load i32, ptr %7, align 4 + %1850 = load i32, ptr %24, align 4 + %1851 = load i32, ptr %8, align 4 + %1852 = mul i32 %1851, 1 + %1853 = add i32 %1850, %1852 + %1854 = mul i32 %1849, %1853 + %1855 = add i32 %1848, %1854 + %1856 = getelementptr inbounds %struct.cmplx, ptr %1847, i32 %1855 + %1857 = getelementptr inbounds %struct.cmplx, ptr %1856, i32 0, i32 0 + store double %1846, ptr %1857, align 8 + %1858 = load ptr, ptr %11, align 4 + %1859 = load i32, ptr %44, align 4 + %1860 = sub i32 %1859, 1 + %1861 = load i32, ptr %7, align 4 + %1862 = sub i32 %1861, 1 + %1863 = mul i32 0, %1862 + %1864 = add i32 %1860, %1863 + %1865 = getelementptr inbounds %struct.cmplx, ptr %1858, i32 %1864 + %1866 = getelementptr inbounds %struct.cmplx, ptr %1865, i32 0, i32 0 + %1867 = load double, ptr %1866, align 8 + %1868 = getelementptr inbounds %struct.cmplx, ptr %56, i32 0, i32 1 + %1869 = load double, ptr %1868, align 8 + %1870 = load i32, ptr %12, align 4 + %1871 = sitofp i32 %1870 to double + %1872 = load ptr, ptr %11, align 4 + %1873 = load i32, ptr %44, align 4 + %1874 = sub i32 %1873, 1 + %1875 = load i32, ptr %7, align 4 + %1876 = sub i32 %1875, 1 + %1877 = mul i32 0, %1876 + %1878 = add i32 %1874, %1877 + %1879 = getelementptr inbounds %struct.cmplx, ptr %1872, i32 %1878 + %1880 = getelementptr inbounds %struct.cmplx, ptr %1879, i32 0, i32 1 + %1881 = load double, ptr %1880, align 8 + %1882 = fmul double %1871, %1881 + %1883 = getelementptr inbounds %struct.cmplx, ptr %56, i32 0, i32 0 + %1884 = load double, ptr %1883, align 8 + %1885 = fmul double %1882, %1884 + %1886 = call double @llvm.fmuladd.f64(double %1867, double %1869, double %1885) + %1887 = load ptr, ptr %10, align 4 + %1888 = load i32, ptr %44, align 4 + %1889 = load i32, ptr %7, align 4 + %1890 = load i32, ptr %24, align 4 + %1891 = load i32, ptr %8, align 4 + %1892 = mul i32 %1891, 1 + %1893 = add i32 %1890, %1892 + %1894 = mul i32 %1889, %1893 + %1895 = add i32 %1888, %1894 + %1896 = getelementptr inbounds %struct.cmplx, ptr %1887, i32 %1895 + %1897 = getelementptr inbounds %struct.cmplx, ptr %1896, i32 0, i32 1 + store double %1886, ptr %1897, align 8 + %1898 = load ptr, ptr %11, align 4 + %1899 = load i32, ptr %44, align 4 + %1900 = sub i32 %1899, 1 + %1901 = load i32, ptr %7, align 4 + %1902 = sub i32 %1901, 1 + %1903 = mul i32 9, %1902 + %1904 = add i32 %1900, %1903 + %1905 = getelementptr inbounds %struct.cmplx, ptr %1898, i32 %1904 + %1906 = getelementptr inbounds %struct.cmplx, ptr %1905, i32 0, i32 0 + %1907 = load double, ptr %1906, align 8 + %1908 = getelementptr inbounds %struct.cmplx, ptr %57, i32 0, i32 0 + %1909 = load double, ptr %1908, align 8 + %1910 = load i32, ptr %12, align 4 + %1911 = sitofp i32 %1910 to double + %1912 = load ptr, ptr %11, align 4 + %1913 = load i32, ptr %44, align 4 + %1914 = sub i32 %1913, 1 + %1915 = load i32, ptr %7, align 4 + %1916 = sub i32 %1915, 1 + %1917 = mul i32 9, %1916 + %1918 = add i32 %1914, %1917 + %1919 = getelementptr inbounds %struct.cmplx, ptr %1912, i32 %1918 + %1920 = getelementptr inbounds %struct.cmplx, ptr %1919, i32 0, i32 1 + %1921 = load double, ptr %1920, align 8 + %1922 = fmul double %1911, %1921 + %1923 = getelementptr inbounds %struct.cmplx, ptr %57, i32 0, i32 1 + %1924 = load double, ptr %1923, align 8 + %1925 = fmul double %1922, %1924 + %1926 = fneg double %1925 + %1927 = call double @llvm.fmuladd.f64(double %1907, double %1909, double %1926) + %1928 = load ptr, ptr %10, align 4 + %1929 = load i32, ptr %44, align 4 + %1930 = load i32, ptr %7, align 4 + %1931 = load i32, ptr %24, align 4 + %1932 = load i32, ptr %8, align 4 + %1933 = mul i32 %1932, 10 + %1934 = add i32 %1931, %1933 + %1935 = mul i32 %1930, %1934 + %1936 = add i32 %1929, %1935 + %1937 = getelementptr inbounds %struct.cmplx, ptr %1928, i32 %1936 + %1938 = getelementptr inbounds %struct.cmplx, ptr %1937, i32 0, i32 0 + store double %1927, ptr %1938, align 8 + %1939 = load ptr, ptr %11, align 4 + %1940 = load i32, ptr %44, align 4 + %1941 = sub i32 %1940, 1 + %1942 = load i32, ptr %7, align 4 + %1943 = sub i32 %1942, 1 + %1944 = mul i32 9, %1943 + %1945 = add i32 %1941, %1944 + %1946 = getelementptr inbounds %struct.cmplx, ptr %1939, i32 %1945 + %1947 = getelementptr inbounds %struct.cmplx, ptr %1946, i32 0, i32 0 + %1948 = load double, ptr %1947, align 8 + %1949 = getelementptr inbounds %struct.cmplx, ptr %57, i32 0, i32 1 + %1950 = load double, ptr %1949, align 8 + %1951 = load i32, ptr %12, align 4 + %1952 = sitofp i32 %1951 to double + %1953 = load ptr, ptr %11, align 4 + %1954 = load i32, ptr %44, align 4 + %1955 = sub i32 %1954, 1 + %1956 = load i32, ptr %7, align 4 + %1957 = sub i32 %1956, 1 + %1958 = mul i32 9, %1957 + %1959 = add i32 %1955, %1958 + %1960 = getelementptr inbounds %struct.cmplx, ptr %1953, i32 %1959 + %1961 = getelementptr inbounds %struct.cmplx, ptr %1960, i32 0, i32 1 + %1962 = load double, ptr %1961, align 8 + %1963 = fmul double %1952, %1962 + %1964 = getelementptr inbounds %struct.cmplx, ptr %57, i32 0, i32 0 + %1965 = load double, ptr %1964, align 8 + %1966 = fmul double %1963, %1965 + %1967 = call double @llvm.fmuladd.f64(double %1948, double %1950, double %1966) + %1968 = load ptr, ptr %10, align 4 + %1969 = load i32, ptr %44, align 4 + %1970 = load i32, ptr %7, align 4 + %1971 = load i32, ptr %24, align 4 + %1972 = load i32, ptr %8, align 4 + %1973 = mul i32 %1972, 10 + %1974 = add i32 %1971, %1973 + %1975 = mul i32 %1970, %1974 + %1976 = add i32 %1969, %1975 + %1977 = getelementptr inbounds %struct.cmplx, ptr %1968, i32 %1976 + %1978 = getelementptr inbounds %struct.cmplx, ptr %1977, i32 0, i32 1 + store double %1967, ptr %1978, align 8 + %1979 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 0 + %1980 = load double, ptr %1979, align 8 + %1981 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 0 + %1982 = load double, ptr %1981, align 8 + %1983 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %1982, double %1980) + %1984 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 0 + %1985 = load double, ptr %1984, align 8 + %1986 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %1985, double %1983) + %1987 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 0 + %1988 = load double, ptr %1987, align 8 + %1989 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %1988, double %1986) + %1990 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 0 + %1991 = load double, ptr %1990, align 8 + %1992 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %1991, double %1989) + %1993 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 0 + %1994 = load double, ptr %1993, align 8 + %1995 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %1994, double %1992) + %1996 = getelementptr inbounds %struct.cmplx, ptr %62, i32 0, i32 0 + store double %1995, ptr %1996, align 8 + %1997 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 1 + %1998 = load double, ptr %1997, align 8 + %1999 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 1 + %2000 = load double, ptr %1999, align 8 + %2001 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %2000, double %1998) + %2002 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 1 + %2003 = load double, ptr %2002, align 8 + %2004 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %2003, double %2001) + %2005 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 1 + %2006 = load double, ptr %2005, align 8 + %2007 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %2006, double %2004) + %2008 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 1 + %2009 = load double, ptr %2008, align 8 + %2010 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %2009, double %2007) + %2011 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 1 + %2012 = load double, ptr %2011, align 8 + %2013 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %2012, double %2010) + %2014 = getelementptr inbounds %struct.cmplx, ptr %62, i32 0, i32 1 + store double %2013, ptr %2014, align 8 + %2015 = load double, ptr %17, align 8 + %2016 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 0 + %2017 = load double, ptr %2016, align 8 + %2018 = load double, ptr %21, align 8 + %2019 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 0 + %2020 = load double, ptr %2019, align 8 + %2021 = fmul double %2018, %2020 + %2022 = call double @llvm.fmuladd.f64(double %2015, double %2017, double %2021) + %2023 = load double, ptr %23, align 8 + %2024 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 0 + %2025 = load double, ptr %2024, align 8 + %2026 = fneg double %2023 + %2027 = call double @llvm.fmuladd.f64(double %2026, double %2025, double %2022) + %2028 = load double, ptr %19, align 8 + %2029 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 0 + %2030 = load double, ptr %2029, align 8 + %2031 = fneg double %2028 + %2032 = call double @llvm.fmuladd.f64(double %2031, double %2030, double %2027) + %2033 = load double, ptr %15, align 8 + %2034 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 0 + %2035 = load double, ptr %2034, align 8 + %2036 = fneg double %2033 + %2037 = call double @llvm.fmuladd.f64(double %2036, double %2035, double %2032) + %2038 = getelementptr inbounds %struct.cmplx, ptr %63, i32 0, i32 1 + store double %2037, ptr %2038, align 8 + %2039 = load double, ptr %17, align 8 + %2040 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 1 + %2041 = load double, ptr %2040, align 8 + %2042 = load double, ptr %21, align 8 + %2043 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 1 + %2044 = load double, ptr %2043, align 8 + %2045 = fmul double %2042, %2044 + %2046 = call double @llvm.fmuladd.f64(double %2039, double %2041, double %2045) + %2047 = load double, ptr %23, align 8 + %2048 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 1 + %2049 = load double, ptr %2048, align 8 + %2050 = fneg double %2047 + %2051 = call double @llvm.fmuladd.f64(double %2050, double %2049, double %2046) + %2052 = load double, ptr %19, align 8 + %2053 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 1 + %2054 = load double, ptr %2053, align 8 + %2055 = fneg double %2052 + %2056 = call double @llvm.fmuladd.f64(double %2055, double %2054, double %2051) + %2057 = load double, ptr %15, align 8 + %2058 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 1 + %2059 = load double, ptr %2058, align 8 + %2060 = fneg double %2057 + %2061 = call double @llvm.fmuladd.f64(double %2060, double %2059, double %2056) + %2062 = fneg double %2061 + %2063 = getelementptr inbounds %struct.cmplx, ptr %63, i32 0, i32 0 + store double %2062, ptr %2063, align 8 + %2064 = getelementptr inbounds %struct.cmplx, ptr %62, i32 0, i32 0 + %2065 = load double, ptr %2064, align 8 + %2066 = getelementptr inbounds %struct.cmplx, ptr %63, i32 0, i32 0 + %2067 = load double, ptr %2066, align 8 + %2068 = fadd double %2065, %2067 + %2069 = getelementptr inbounds %struct.cmplx, ptr %60, i32 0, i32 0 + store double %2068, ptr %2069, align 8 + %2070 = getelementptr inbounds %struct.cmplx, ptr %62, i32 0, i32 1 + %2071 = load double, ptr %2070, align 8 + %2072 = getelementptr inbounds %struct.cmplx, ptr %63, i32 0, i32 1 + %2073 = load double, ptr %2072, align 8 + %2074 = fadd double %2071, %2073 + %2075 = getelementptr inbounds %struct.cmplx, ptr %60, i32 0, i32 1 + store double %2074, ptr %2075, align 8 + %2076 = getelementptr inbounds %struct.cmplx, ptr %62, i32 0, i32 0 + %2077 = load double, ptr %2076, align 8 + %2078 = getelementptr inbounds %struct.cmplx, ptr %63, i32 0, i32 0 + %2079 = load double, ptr %2078, align 8 + %2080 = fsub double %2077, %2079 + %2081 = getelementptr inbounds %struct.cmplx, ptr %61, i32 0, i32 0 + store double %2080, ptr %2081, align 8 + %2082 = getelementptr inbounds %struct.cmplx, ptr %62, i32 0, i32 1 + %2083 = load double, ptr %2082, align 8 + %2084 = getelementptr inbounds %struct.cmplx, ptr %63, i32 0, i32 1 + %2085 = load double, ptr %2084, align 8 + %2086 = fsub double %2083, %2085 + %2087 = getelementptr inbounds %struct.cmplx, ptr %61, i32 0, i32 1 + store double %2086, ptr %2087, align 8 + %2088 = load ptr, ptr %11, align 4 + %2089 = load i32, ptr %44, align 4 + %2090 = sub i32 %2089, 1 + %2091 = load i32, ptr %7, align 4 + %2092 = sub i32 %2091, 1 + %2093 = mul i32 1, %2092 + %2094 = add i32 %2090, %2093 + %2095 = getelementptr inbounds %struct.cmplx, ptr %2088, i32 %2094 + %2096 = getelementptr inbounds %struct.cmplx, ptr %2095, i32 0, i32 0 + %2097 = load double, ptr %2096, align 8 + %2098 = getelementptr inbounds %struct.cmplx, ptr %60, i32 0, i32 0 + %2099 = load double, ptr %2098, align 8 + %2100 = load i32, ptr %12, align 4 + %2101 = sitofp i32 %2100 to double + %2102 = load ptr, ptr %11, align 4 + %2103 = load i32, ptr %44, align 4 + %2104 = sub i32 %2103, 1 + %2105 = load i32, ptr %7, align 4 + %2106 = sub i32 %2105, 1 + %2107 = mul i32 1, %2106 + %2108 = add i32 %2104, %2107 + %2109 = getelementptr inbounds %struct.cmplx, ptr %2102, i32 %2108 + %2110 = getelementptr inbounds %struct.cmplx, ptr %2109, i32 0, i32 1 + %2111 = load double, ptr %2110, align 8 + %2112 = fmul double %2101, %2111 + %2113 = getelementptr inbounds %struct.cmplx, ptr %60, i32 0, i32 1 + %2114 = load double, ptr %2113, align 8 + %2115 = fmul double %2112, %2114 + %2116 = fneg double %2115 + %2117 = call double @llvm.fmuladd.f64(double %2097, double %2099, double %2116) + %2118 = load ptr, ptr %10, align 4 + %2119 = load i32, ptr %44, align 4 + %2120 = load i32, ptr %7, align 4 + %2121 = load i32, ptr %24, align 4 + %2122 = load i32, ptr %8, align 4 + %2123 = mul i32 %2122, 2 + %2124 = add i32 %2121, %2123 + %2125 = mul i32 %2120, %2124 + %2126 = add i32 %2119, %2125 + %2127 = getelementptr inbounds %struct.cmplx, ptr %2118, i32 %2126 + %2128 = getelementptr inbounds %struct.cmplx, ptr %2127, i32 0, i32 0 + store double %2117, ptr %2128, align 8 + %2129 = load ptr, ptr %11, align 4 + %2130 = load i32, ptr %44, align 4 + %2131 = sub i32 %2130, 1 + %2132 = load i32, ptr %7, align 4 + %2133 = sub i32 %2132, 1 + %2134 = mul i32 1, %2133 + %2135 = add i32 %2131, %2134 + %2136 = getelementptr inbounds %struct.cmplx, ptr %2129, i32 %2135 + %2137 = getelementptr inbounds %struct.cmplx, ptr %2136, i32 0, i32 0 + %2138 = load double, ptr %2137, align 8 + %2139 = getelementptr inbounds %struct.cmplx, ptr %60, i32 0, i32 1 + %2140 = load double, ptr %2139, align 8 + %2141 = load i32, ptr %12, align 4 + %2142 = sitofp i32 %2141 to double + %2143 = load ptr, ptr %11, align 4 + %2144 = load i32, ptr %44, align 4 + %2145 = sub i32 %2144, 1 + %2146 = load i32, ptr %7, align 4 + %2147 = sub i32 %2146, 1 + %2148 = mul i32 1, %2147 + %2149 = add i32 %2145, %2148 + %2150 = getelementptr inbounds %struct.cmplx, ptr %2143, i32 %2149 + %2151 = getelementptr inbounds %struct.cmplx, ptr %2150, i32 0, i32 1 + %2152 = load double, ptr %2151, align 8 + %2153 = fmul double %2142, %2152 + %2154 = getelementptr inbounds %struct.cmplx, ptr %60, i32 0, i32 0 + %2155 = load double, ptr %2154, align 8 + %2156 = fmul double %2153, %2155 + %2157 = call double @llvm.fmuladd.f64(double %2138, double %2140, double %2156) + %2158 = load ptr, ptr %10, align 4 + %2159 = load i32, ptr %44, align 4 + %2160 = load i32, ptr %7, align 4 + %2161 = load i32, ptr %24, align 4 + %2162 = load i32, ptr %8, align 4 + %2163 = mul i32 %2162, 2 + %2164 = add i32 %2161, %2163 + %2165 = mul i32 %2160, %2164 + %2166 = add i32 %2159, %2165 + %2167 = getelementptr inbounds %struct.cmplx, ptr %2158, i32 %2166 + %2168 = getelementptr inbounds %struct.cmplx, ptr %2167, i32 0, i32 1 + store double %2157, ptr %2168, align 8 + %2169 = load ptr, ptr %11, align 4 + %2170 = load i32, ptr %44, align 4 + %2171 = sub i32 %2170, 1 + %2172 = load i32, ptr %7, align 4 + %2173 = sub i32 %2172, 1 + %2174 = mul i32 8, %2173 + %2175 = add i32 %2171, %2174 + %2176 = getelementptr inbounds %struct.cmplx, ptr %2169, i32 %2175 + %2177 = getelementptr inbounds %struct.cmplx, ptr %2176, i32 0, i32 0 + %2178 = load double, ptr %2177, align 8 + %2179 = getelementptr inbounds %struct.cmplx, ptr %61, i32 0, i32 0 + %2180 = load double, ptr %2179, align 8 + %2181 = load i32, ptr %12, align 4 + %2182 = sitofp i32 %2181 to double + %2183 = load ptr, ptr %11, align 4 + %2184 = load i32, ptr %44, align 4 + %2185 = sub i32 %2184, 1 + %2186 = load i32, ptr %7, align 4 + %2187 = sub i32 %2186, 1 + %2188 = mul i32 8, %2187 + %2189 = add i32 %2185, %2188 + %2190 = getelementptr inbounds %struct.cmplx, ptr %2183, i32 %2189 + %2191 = getelementptr inbounds %struct.cmplx, ptr %2190, i32 0, i32 1 + %2192 = load double, ptr %2191, align 8 + %2193 = fmul double %2182, %2192 + %2194 = getelementptr inbounds %struct.cmplx, ptr %61, i32 0, i32 1 + %2195 = load double, ptr %2194, align 8 + %2196 = fmul double %2193, %2195 + %2197 = fneg double %2196 + %2198 = call double @llvm.fmuladd.f64(double %2178, double %2180, double %2197) + %2199 = load ptr, ptr %10, align 4 + %2200 = load i32, ptr %44, align 4 + %2201 = load i32, ptr %7, align 4 + %2202 = load i32, ptr %24, align 4 + %2203 = load i32, ptr %8, align 4 + %2204 = mul i32 %2203, 9 + %2205 = add i32 %2202, %2204 + %2206 = mul i32 %2201, %2205 + %2207 = add i32 %2200, %2206 + %2208 = getelementptr inbounds %struct.cmplx, ptr %2199, i32 %2207 + %2209 = getelementptr inbounds %struct.cmplx, ptr %2208, i32 0, i32 0 + store double %2198, ptr %2209, align 8 + %2210 = load ptr, ptr %11, align 4 + %2211 = load i32, ptr %44, align 4 + %2212 = sub i32 %2211, 1 + %2213 = load i32, ptr %7, align 4 + %2214 = sub i32 %2213, 1 + %2215 = mul i32 8, %2214 + %2216 = add i32 %2212, %2215 + %2217 = getelementptr inbounds %struct.cmplx, ptr %2210, i32 %2216 + %2218 = getelementptr inbounds %struct.cmplx, ptr %2217, i32 0, i32 0 + %2219 = load double, ptr %2218, align 8 + %2220 = getelementptr inbounds %struct.cmplx, ptr %61, i32 0, i32 1 + %2221 = load double, ptr %2220, align 8 + %2222 = load i32, ptr %12, align 4 + %2223 = sitofp i32 %2222 to double + %2224 = load ptr, ptr %11, align 4 + %2225 = load i32, ptr %44, align 4 + %2226 = sub i32 %2225, 1 + %2227 = load i32, ptr %7, align 4 + %2228 = sub i32 %2227, 1 + %2229 = mul i32 8, %2228 + %2230 = add i32 %2226, %2229 + %2231 = getelementptr inbounds %struct.cmplx, ptr %2224, i32 %2230 + %2232 = getelementptr inbounds %struct.cmplx, ptr %2231, i32 0, i32 1 + %2233 = load double, ptr %2232, align 8 + %2234 = fmul double %2223, %2233 + %2235 = getelementptr inbounds %struct.cmplx, ptr %61, i32 0, i32 0 + %2236 = load double, ptr %2235, align 8 + %2237 = fmul double %2234, %2236 + %2238 = call double @llvm.fmuladd.f64(double %2219, double %2221, double %2237) + %2239 = load ptr, ptr %10, align 4 + %2240 = load i32, ptr %44, align 4 + %2241 = load i32, ptr %7, align 4 + %2242 = load i32, ptr %24, align 4 + %2243 = load i32, ptr %8, align 4 + %2244 = mul i32 %2243, 9 + %2245 = add i32 %2242, %2244 + %2246 = mul i32 %2241, %2245 + %2247 = add i32 %2240, %2246 + %2248 = getelementptr inbounds %struct.cmplx, ptr %2239, i32 %2247 + %2249 = getelementptr inbounds %struct.cmplx, ptr %2248, i32 0, i32 1 + store double %2238, ptr %2249, align 8 + %2250 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 0 + %2251 = load double, ptr %2250, align 8 + %2252 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 0 + %2253 = load double, ptr %2252, align 8 + %2254 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %2253, double %2251) + %2255 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 0 + %2256 = load double, ptr %2255, align 8 + %2257 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %2256, double %2254) + %2258 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 0 + %2259 = load double, ptr %2258, align 8 + %2260 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %2259, double %2257) + %2261 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 0 + %2262 = load double, ptr %2261, align 8 + %2263 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %2262, double %2260) + %2264 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 0 + %2265 = load double, ptr %2264, align 8 + %2266 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %2265, double %2263) + %2267 = getelementptr inbounds %struct.cmplx, ptr %66, i32 0, i32 0 + store double %2266, ptr %2267, align 8 + %2268 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 1 + %2269 = load double, ptr %2268, align 8 + %2270 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 1 + %2271 = load double, ptr %2270, align 8 + %2272 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %2271, double %2269) + %2273 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 1 + %2274 = load double, ptr %2273, align 8 + %2275 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %2274, double %2272) + %2276 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 1 + %2277 = load double, ptr %2276, align 8 + %2278 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %2277, double %2275) + %2279 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 1 + %2280 = load double, ptr %2279, align 8 + %2281 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %2280, double %2278) + %2282 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 1 + %2283 = load double, ptr %2282, align 8 + %2284 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %2283, double %2281) + %2285 = getelementptr inbounds %struct.cmplx, ptr %66, i32 0, i32 1 + store double %2284, ptr %2285, align 8 + %2286 = load double, ptr %19, align 8 + %2287 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 0 + %2288 = load double, ptr %2287, align 8 + %2289 = load double, ptr %23, align 8 + %2290 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 0 + %2291 = load double, ptr %2290, align 8 + %2292 = fmul double %2289, %2291 + %2293 = fneg double %2292 + %2294 = call double @llvm.fmuladd.f64(double %2286, double %2288, double %2293) + %2295 = load double, ptr %17, align 8 + %2296 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 0 + %2297 = load double, ptr %2296, align 8 + %2298 = fneg double %2295 + %2299 = call double @llvm.fmuladd.f64(double %2298, double %2297, double %2294) + %2300 = load double, ptr %15, align 8 + %2301 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 0 + %2302 = load double, ptr %2301, align 8 + %2303 = call double @llvm.fmuladd.f64(double %2300, double %2302, double %2299) + %2304 = load double, ptr %21, align 8 + %2305 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 0 + %2306 = load double, ptr %2305, align 8 + %2307 = call double @llvm.fmuladd.f64(double %2304, double %2306, double %2303) + %2308 = getelementptr inbounds %struct.cmplx, ptr %67, i32 0, i32 1 + store double %2307, ptr %2308, align 8 + %2309 = load double, ptr %19, align 8 + %2310 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 1 + %2311 = load double, ptr %2310, align 8 + %2312 = load double, ptr %23, align 8 + %2313 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 1 + %2314 = load double, ptr %2313, align 8 + %2315 = fmul double %2312, %2314 + %2316 = fneg double %2315 + %2317 = call double @llvm.fmuladd.f64(double %2309, double %2311, double %2316) + %2318 = load double, ptr %17, align 8 + %2319 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 1 + %2320 = load double, ptr %2319, align 8 + %2321 = fneg double %2318 + %2322 = call double @llvm.fmuladd.f64(double %2321, double %2320, double %2317) + %2323 = load double, ptr %15, align 8 + %2324 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 1 + %2325 = load double, ptr %2324, align 8 + %2326 = call double @llvm.fmuladd.f64(double %2323, double %2325, double %2322) + %2327 = load double, ptr %21, align 8 + %2328 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 1 + %2329 = load double, ptr %2328, align 8 + %2330 = call double @llvm.fmuladd.f64(double %2327, double %2329, double %2326) + %2331 = fneg double %2330 + %2332 = getelementptr inbounds %struct.cmplx, ptr %67, i32 0, i32 0 + store double %2331, ptr %2332, align 8 + %2333 = getelementptr inbounds %struct.cmplx, ptr %66, i32 0, i32 0 + %2334 = load double, ptr %2333, align 8 + %2335 = getelementptr inbounds %struct.cmplx, ptr %67, i32 0, i32 0 + %2336 = load double, ptr %2335, align 8 + %2337 = fadd double %2334, %2336 + %2338 = getelementptr inbounds %struct.cmplx, ptr %64, i32 0, i32 0 + store double %2337, ptr %2338, align 8 + %2339 = getelementptr inbounds %struct.cmplx, ptr %66, i32 0, i32 1 + %2340 = load double, ptr %2339, align 8 + %2341 = getelementptr inbounds %struct.cmplx, ptr %67, i32 0, i32 1 + %2342 = load double, ptr %2341, align 8 + %2343 = fadd double %2340, %2342 + %2344 = getelementptr inbounds %struct.cmplx, ptr %64, i32 0, i32 1 + store double %2343, ptr %2344, align 8 + %2345 = getelementptr inbounds %struct.cmplx, ptr %66, i32 0, i32 0 + %2346 = load double, ptr %2345, align 8 + %2347 = getelementptr inbounds %struct.cmplx, ptr %67, i32 0, i32 0 + %2348 = load double, ptr %2347, align 8 + %2349 = fsub double %2346, %2348 + %2350 = getelementptr inbounds %struct.cmplx, ptr %65, i32 0, i32 0 + store double %2349, ptr %2350, align 8 + %2351 = getelementptr inbounds %struct.cmplx, ptr %66, i32 0, i32 1 + %2352 = load double, ptr %2351, align 8 + %2353 = getelementptr inbounds %struct.cmplx, ptr %67, i32 0, i32 1 + %2354 = load double, ptr %2353, align 8 + %2355 = fsub double %2352, %2354 + %2356 = getelementptr inbounds %struct.cmplx, ptr %65, i32 0, i32 1 + store double %2355, ptr %2356, align 8 + %2357 = load ptr, ptr %11, align 4 + %2358 = load i32, ptr %44, align 4 + %2359 = sub i32 %2358, 1 + %2360 = load i32, ptr %7, align 4 + %2361 = sub i32 %2360, 1 + %2362 = mul i32 2, %2361 + %2363 = add i32 %2359, %2362 + %2364 = getelementptr inbounds %struct.cmplx, ptr %2357, i32 %2363 + %2365 = getelementptr inbounds %struct.cmplx, ptr %2364, i32 0, i32 0 + %2366 = load double, ptr %2365, align 8 + %2367 = getelementptr inbounds %struct.cmplx, ptr %64, i32 0, i32 0 + %2368 = load double, ptr %2367, align 8 + %2369 = load i32, ptr %12, align 4 + %2370 = sitofp i32 %2369 to double + %2371 = load ptr, ptr %11, align 4 + %2372 = load i32, ptr %44, align 4 + %2373 = sub i32 %2372, 1 + %2374 = load i32, ptr %7, align 4 + %2375 = sub i32 %2374, 1 + %2376 = mul i32 2, %2375 + %2377 = add i32 %2373, %2376 + %2378 = getelementptr inbounds %struct.cmplx, ptr %2371, i32 %2377 + %2379 = getelementptr inbounds %struct.cmplx, ptr %2378, i32 0, i32 1 + %2380 = load double, ptr %2379, align 8 + %2381 = fmul double %2370, %2380 + %2382 = getelementptr inbounds %struct.cmplx, ptr %64, i32 0, i32 1 + %2383 = load double, ptr %2382, align 8 + %2384 = fmul double %2381, %2383 + %2385 = fneg double %2384 + %2386 = call double @llvm.fmuladd.f64(double %2366, double %2368, double %2385) + %2387 = load ptr, ptr %10, align 4 + %2388 = load i32, ptr %44, align 4 + %2389 = load i32, ptr %7, align 4 + %2390 = load i32, ptr %24, align 4 + %2391 = load i32, ptr %8, align 4 + %2392 = mul i32 %2391, 3 + %2393 = add i32 %2390, %2392 + %2394 = mul i32 %2389, %2393 + %2395 = add i32 %2388, %2394 + %2396 = getelementptr inbounds %struct.cmplx, ptr %2387, i32 %2395 + %2397 = getelementptr inbounds %struct.cmplx, ptr %2396, i32 0, i32 0 + store double %2386, ptr %2397, align 8 + %2398 = load ptr, ptr %11, align 4 + %2399 = load i32, ptr %44, align 4 + %2400 = sub i32 %2399, 1 + %2401 = load i32, ptr %7, align 4 + %2402 = sub i32 %2401, 1 + %2403 = mul i32 2, %2402 + %2404 = add i32 %2400, %2403 + %2405 = getelementptr inbounds %struct.cmplx, ptr %2398, i32 %2404 + %2406 = getelementptr inbounds %struct.cmplx, ptr %2405, i32 0, i32 0 + %2407 = load double, ptr %2406, align 8 + %2408 = getelementptr inbounds %struct.cmplx, ptr %64, i32 0, i32 1 + %2409 = load double, ptr %2408, align 8 + %2410 = load i32, ptr %12, align 4 + %2411 = sitofp i32 %2410 to double + %2412 = load ptr, ptr %11, align 4 + %2413 = load i32, ptr %44, align 4 + %2414 = sub i32 %2413, 1 + %2415 = load i32, ptr %7, align 4 + %2416 = sub i32 %2415, 1 + %2417 = mul i32 2, %2416 + %2418 = add i32 %2414, %2417 + %2419 = getelementptr inbounds %struct.cmplx, ptr %2412, i32 %2418 + %2420 = getelementptr inbounds %struct.cmplx, ptr %2419, i32 0, i32 1 + %2421 = load double, ptr %2420, align 8 + %2422 = fmul double %2411, %2421 + %2423 = getelementptr inbounds %struct.cmplx, ptr %64, i32 0, i32 0 + %2424 = load double, ptr %2423, align 8 + %2425 = fmul double %2422, %2424 + %2426 = call double @llvm.fmuladd.f64(double %2407, double %2409, double %2425) + %2427 = load ptr, ptr %10, align 4 + %2428 = load i32, ptr %44, align 4 + %2429 = load i32, ptr %7, align 4 + %2430 = load i32, ptr %24, align 4 + %2431 = load i32, ptr %8, align 4 + %2432 = mul i32 %2431, 3 + %2433 = add i32 %2430, %2432 + %2434 = mul i32 %2429, %2433 + %2435 = add i32 %2428, %2434 + %2436 = getelementptr inbounds %struct.cmplx, ptr %2427, i32 %2435 + %2437 = getelementptr inbounds %struct.cmplx, ptr %2436, i32 0, i32 1 + store double %2426, ptr %2437, align 8 + %2438 = load ptr, ptr %11, align 4 + %2439 = load i32, ptr %44, align 4 + %2440 = sub i32 %2439, 1 + %2441 = load i32, ptr %7, align 4 + %2442 = sub i32 %2441, 1 + %2443 = mul i32 7, %2442 + %2444 = add i32 %2440, %2443 + %2445 = getelementptr inbounds %struct.cmplx, ptr %2438, i32 %2444 + %2446 = getelementptr inbounds %struct.cmplx, ptr %2445, i32 0, i32 0 + %2447 = load double, ptr %2446, align 8 + %2448 = getelementptr inbounds %struct.cmplx, ptr %65, i32 0, i32 0 + %2449 = load double, ptr %2448, align 8 + %2450 = load i32, ptr %12, align 4 + %2451 = sitofp i32 %2450 to double + %2452 = load ptr, ptr %11, align 4 + %2453 = load i32, ptr %44, align 4 + %2454 = sub i32 %2453, 1 + %2455 = load i32, ptr %7, align 4 + %2456 = sub i32 %2455, 1 + %2457 = mul i32 7, %2456 + %2458 = add i32 %2454, %2457 + %2459 = getelementptr inbounds %struct.cmplx, ptr %2452, i32 %2458 + %2460 = getelementptr inbounds %struct.cmplx, ptr %2459, i32 0, i32 1 + %2461 = load double, ptr %2460, align 8 + %2462 = fmul double %2451, %2461 + %2463 = getelementptr inbounds %struct.cmplx, ptr %65, i32 0, i32 1 + %2464 = load double, ptr %2463, align 8 + %2465 = fmul double %2462, %2464 + %2466 = fneg double %2465 + %2467 = call double @llvm.fmuladd.f64(double %2447, double %2449, double %2466) + %2468 = load ptr, ptr %10, align 4 + %2469 = load i32, ptr %44, align 4 + %2470 = load i32, ptr %7, align 4 + %2471 = load i32, ptr %24, align 4 + %2472 = load i32, ptr %8, align 4 + %2473 = mul i32 %2472, 8 + %2474 = add i32 %2471, %2473 + %2475 = mul i32 %2470, %2474 + %2476 = add i32 %2469, %2475 + %2477 = getelementptr inbounds %struct.cmplx, ptr %2468, i32 %2476 + %2478 = getelementptr inbounds %struct.cmplx, ptr %2477, i32 0, i32 0 + store double %2467, ptr %2478, align 8 + %2479 = load ptr, ptr %11, align 4 + %2480 = load i32, ptr %44, align 4 + %2481 = sub i32 %2480, 1 + %2482 = load i32, ptr %7, align 4 + %2483 = sub i32 %2482, 1 + %2484 = mul i32 7, %2483 + %2485 = add i32 %2481, %2484 + %2486 = getelementptr inbounds %struct.cmplx, ptr %2479, i32 %2485 + %2487 = getelementptr inbounds %struct.cmplx, ptr %2486, i32 0, i32 0 + %2488 = load double, ptr %2487, align 8 + %2489 = getelementptr inbounds %struct.cmplx, ptr %65, i32 0, i32 1 + %2490 = load double, ptr %2489, align 8 + %2491 = load i32, ptr %12, align 4 + %2492 = sitofp i32 %2491 to double + %2493 = load ptr, ptr %11, align 4 + %2494 = load i32, ptr %44, align 4 + %2495 = sub i32 %2494, 1 + %2496 = load i32, ptr %7, align 4 + %2497 = sub i32 %2496, 1 + %2498 = mul i32 7, %2497 + %2499 = add i32 %2495, %2498 + %2500 = getelementptr inbounds %struct.cmplx, ptr %2493, i32 %2499 + %2501 = getelementptr inbounds %struct.cmplx, ptr %2500, i32 0, i32 1 + %2502 = load double, ptr %2501, align 8 + %2503 = fmul double %2492, %2502 + %2504 = getelementptr inbounds %struct.cmplx, ptr %65, i32 0, i32 0 + %2505 = load double, ptr %2504, align 8 + %2506 = fmul double %2503, %2505 + %2507 = call double @llvm.fmuladd.f64(double %2488, double %2490, double %2506) + %2508 = load ptr, ptr %10, align 4 + %2509 = load i32, ptr %44, align 4 + %2510 = load i32, ptr %7, align 4 + %2511 = load i32, ptr %24, align 4 + %2512 = load i32, ptr %8, align 4 + %2513 = mul i32 %2512, 8 + %2514 = add i32 %2511, %2513 + %2515 = mul i32 %2510, %2514 + %2516 = add i32 %2509, %2515 + %2517 = getelementptr inbounds %struct.cmplx, ptr %2508, i32 %2516 + %2518 = getelementptr inbounds %struct.cmplx, ptr %2517, i32 0, i32 1 + store double %2507, ptr %2518, align 8 + %2519 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 0 + %2520 = load double, ptr %2519, align 8 + %2521 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 0 + %2522 = load double, ptr %2521, align 8 + %2523 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %2522, double %2520) + %2524 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 0 + %2525 = load double, ptr %2524, align 8 + %2526 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %2525, double %2523) + %2527 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 0 + %2528 = load double, ptr %2527, align 8 + %2529 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %2528, double %2526) + %2530 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 0 + %2531 = load double, ptr %2530, align 8 + %2532 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %2531, double %2529) + %2533 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 0 + %2534 = load double, ptr %2533, align 8 + %2535 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %2534, double %2532) + %2536 = getelementptr inbounds %struct.cmplx, ptr %70, i32 0, i32 0 + store double %2535, ptr %2536, align 8 + %2537 = getelementptr inbounds %struct.cmplx, ptr %45, i32 0, i32 1 + %2538 = load double, ptr %2537, align 8 + %2539 = getelementptr inbounds %struct.cmplx, ptr %46, i32 0, i32 1 + %2540 = load double, ptr %2539, align 8 + %2541 = call double @llvm.fmuladd.f64(double 0xBFE4F49E7F775887, double %2540, double %2538) + %2542 = getelementptr inbounds %struct.cmplx, ptr %47, i32 0, i32 1 + %2543 = load double, ptr %2542, align 8 + %2544 = call double @llvm.fmuladd.f64(double 0xBFC2375F640F44DB, double %2543, double %2541) + %2545 = getelementptr inbounds %struct.cmplx, ptr %48, i32 0, i32 1 + %2546 = load double, ptr %2545, align 8 + %2547 = call double @llvm.fmuladd.f64(double 0x3FEAEB8C8764F0BA, double %2546, double %2544) + %2548 = getelementptr inbounds %struct.cmplx, ptr %49, i32 0, i32 1 + %2549 = load double, ptr %2548, align 8 + %2550 = call double @llvm.fmuladd.f64(double 0xBFEEB42A9BCD5057, double %2549, double %2547) + %2551 = getelementptr inbounds %struct.cmplx, ptr %50, i32 0, i32 1 + %2552 = load double, ptr %2551, align 8 + %2553 = call double @llvm.fmuladd.f64(double 0x3FDA9628D9C712B6, double %2552, double %2550) + %2554 = getelementptr inbounds %struct.cmplx, ptr %70, i32 0, i32 1 + store double %2553, ptr %2554, align 8 + %2555 = load double, ptr %21, align 8 + %2556 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 0 + %2557 = load double, ptr %2556, align 8 + %2558 = load double, ptr %19, align 8 + %2559 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 0 + %2560 = load double, ptr %2559, align 8 + %2561 = fmul double %2558, %2560 + %2562 = fneg double %2561 + %2563 = call double @llvm.fmuladd.f64(double %2555, double %2557, double %2562) + %2564 = load double, ptr %15, align 8 + %2565 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 0 + %2566 = load double, ptr %2565, align 8 + %2567 = call double @llvm.fmuladd.f64(double %2564, double %2566, double %2563) + %2568 = load double, ptr %23, align 8 + %2569 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 0 + %2570 = load double, ptr %2569, align 8 + %2571 = call double @llvm.fmuladd.f64(double %2568, double %2570, double %2567) + %2572 = load double, ptr %17, align 8 + %2573 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 0 + %2574 = load double, ptr %2573, align 8 + %2575 = fneg double %2572 + %2576 = call double @llvm.fmuladd.f64(double %2575, double %2574, double %2571) + %2577 = getelementptr inbounds %struct.cmplx, ptr %71, i32 0, i32 1 + store double %2576, ptr %2577, align 8 + %2578 = load double, ptr %21, align 8 + %2579 = getelementptr inbounds %struct.cmplx, ptr %55, i32 0, i32 1 + %2580 = load double, ptr %2579, align 8 + %2581 = load double, ptr %19, align 8 + %2582 = getelementptr inbounds %struct.cmplx, ptr %54, i32 0, i32 1 + %2583 = load double, ptr %2582, align 8 + %2584 = fmul double %2581, %2583 + %2585 = fneg double %2584 + %2586 = call double @llvm.fmuladd.f64(double %2578, double %2580, double %2585) + %2587 = load double, ptr %15, align 8 + %2588 = getelementptr inbounds %struct.cmplx, ptr %53, i32 0, i32 1 + %2589 = load double, ptr %2588, align 8 + %2590 = call double @llvm.fmuladd.f64(double %2587, double %2589, double %2586) + %2591 = load double, ptr %23, align 8 + %2592 = getelementptr inbounds %struct.cmplx, ptr %52, i32 0, i32 1 + %2593 = load double, ptr %2592, align 8 + %2594 = call double @llvm.fmuladd.f64(double %2591, double %2593, double %2590) + %2595 = load double, ptr %17, align 8 + %2596 = getelementptr inbounds %struct.cmplx, ptr %51, i32 0, i32 1 + %2597 = load double, ptr %2596, align 8 + %2598 = fneg double %2595 + %2599 = call double @llvm.fmuladd.f64(double %2598, double %2597, double %2594) + %2600 = fneg double %2599 + %2601 = getelementptr inbounds %struct.cmplx, ptr %71, i32 0, i32 0 + store double %2600, ptr %2601, align 8 + %2602 = getelementptr inbounds %struct.cmplx, ptr %70, i32 0, i32 0 + %2603 = load double, ptr %2602, align 8 + %2604 = getelementptr inbounds %struct.cmplx, ptr %71, i32 0, i32 0 + %2605 = load double, ptr %2604, align 8 + %2606 = fadd double %2603, %2605 + %2607 = getelementptr inbounds %struct.cmplx, ptr %68, i32 0, i32 0 + store double %2606, ptr %2607, align 8 + %2608 = getelementptr inbounds %struct.cmplx, ptr %70, i32 0, i32 1 + %2609 = load double, ptr %2608, align 8 + %2610 = getelementptr inbounds %struct.cmplx, ptr %71, i32 0, i32 1 + %2611 = load double, ptr %2610, align 8 + %2612 = fadd double %2609, %2611 + %2613 = getelementptr inbounds %struct.cmplx, ptr %68, i32 0, i32 1 + store double %2612, ptr %2613, align 8 + %2614 = getelementptr inbounds %struct.cmplx, ptr %70, i32 0, i32 0 + %2615 = load double, ptr %2614, align 8 + %2616 = getelementptr inbounds %struct.cmplx, ptr %71, i32 0, i32 0 + %2617 = load double, ptr %2616, align 8 + %2618 = fsub double %2615, %2617 + %2619 = getelementptr inbounds %struct.cmplx, ptr %69, i32 0, i32 0 + store double %2618, ptr %2619, align 8 + %2620 = getelementptr inbounds %struct.cmplx, ptr %70, i32 0, i32 1 + %2621 = load double, ptr %2620, align 8 + %2622 = getelementptr inbounds %struct.cmplx, ptr %71, i32 0, i32 1 + %2623 = load double, ptr %2622, align 8 + %2624 = fsub double %2621, %2623 + %2625 = getelementptr inbounds %struct.cmplx, ptr %69, i32 0, i32 1 + store double %2624, ptr %2625, align 8 + %2626 = load ptr, ptr %11, align 4 + %2627 = load i32, ptr %44, align 4 + %2628 = sub i32 %2627, 1 + %2629 = load i32, ptr %7, align 4 + %2630 = sub i32 %2629, 1 + %2631 = mul i32 3, %2630 + %2632 = add i32 %2628, %2631 + %2633 = getelementptr inbounds %struct.cmplx, ptr %2626, i32 %2632 + %2634 = getelementptr inbounds %struct.cmplx, ptr %2633, i32 0, i32 0 + %2635 = load double, ptr %2634, align 8 + %2636 = getelementptr inbounds %struct.cmplx, ptr %68, i32 0, i32 0 + %2637 = load double, ptr %2636, align 8 + %2638 = load i32, ptr %12, align 4 + %2639 = sitofp i32 %2638 to double + %2640 = load ptr, ptr %11, align 4 + %2641 = load i32, ptr %44, align 4 + %2642 = sub i32 %2641, 1 + %2643 = load i32, ptr %7, align 4 + %2644 = sub i32 %2643, 1 + %2645 = mul i32 3, %2644 + %2646 = add i32 %2642, %2645 + %2647 = getelementptr inbounds %struct.cmplx, ptr %2640, i32 %2646 + %2648 = getelementptr inbounds %struct.cmplx, ptr %2647, i32 0, i32 1 + %2649 = load double, ptr %2648, align 8 + %2650 = fmul double %2639, %2649 + %2651 = getelementptr inbounds %struct.cmplx, ptr %68, i32 0, i32 1 + %2652 = load double, ptr %2651, align 8 + %2653 = fmul double %2650, %2652 + %2654 = fneg double %2653 + %2655 = call double @llvm.fmuladd.f64(double %2635, double %2637, double %2654) + %2656 = load ptr, ptr %10, align 4 + %2657 = load i32, ptr %44, align 4 + %2658 = load i32, ptr %7, align 4 + %2659 = load i32, ptr %24, align 4 + %2660 = load i32, ptr %8, align 4 + %2661 = mul i32 %2660, 4 + %2662 = add i32 %2659, %2661 + %2663 = mul i32 %2658, %2662 + %2664 = add i32 %2657, %2663 + %2665 = getelementptr inbounds %struct.cmplx, ptr %2656, i32 %2664 + %2666 = getelementptr inbounds %struct.cmplx, ptr %2665, i32 0, i32 0 + store double %2655, ptr %2666, align 8 + %2667 = load ptr, ptr %11, align 4 + %2668 = load i32, ptr %44, align 4 + %2669 = sub i32 %2668, 1 + %2670 = load i32, ptr %7, align 4 + %2671 = sub i32 %2670, 1 + %2672 = mul i32 3, %2671 + %2673 = add i32 %2669, %2672 + %2674 = getelementptr inbounds %struct.cmplx, ptr %2667, i32 %2673 + %2675 = getelementptr inbounds %struct.cmplx, ptr %2674, i32 0, i32 0 + %2676 = load double, ptr %2675, align 8 + %2677 = getelementptr inbounds %struct.cmplx, ptr %68, i32 0, i32 1 + %2678 = load double, ptr %2677, align 8 + %2679 = load i32, ptr %12, align 4 + %2680 = sitofp i32 %2679 to double + %2681 = load ptr, ptr %11, align 4 + %2682 = load i32, ptr %44, align 4 + %2683 = sub i32 %2682, 1 + %2684 = load i32, ptr %7, align 4 + %2685 = sub i32 %2684, 1 + %2686 = mul i32 3, %2685 + %2687 = add i32 %2683, %2686 + %2688 = getelementptr inbounds %struct.cmplx, ptr %2681, i32 %2687 + %2689 = getelementptr inbounds %struct.cmplx, ptr %2688, i32 0, i32 1 + %2690 = load double, ptr %2689, align 8 + %2691 = fmul double %2680, %2690 + %2692 = getelementptr inbounds %struct.cmplx, ptr %68, i32 0, i32 0 + %2693 = load double, ptr %2692, align 8 + %2694 = fmul double %2691, %2693 + %2695 = call double @llvm.fmuladd.f64(double %2676, double %2678, double %2694) + %2696 = load ptr, ptr %10, align 4 + %2697 = load i32, ptr %44, align 4 + %2698 = load i32, ptr %7, align 4 + %2699 = load i32, ptr %24, align 4 + %2700 = load i32, ptr %8, align 4 + %2701 = mul i32 %2700, 4 + %2702 = add i32 %2699, %2701 + %2703 = mul i32 %2698, %2702 + %2704 = add i32 %2697, %2703 + %2705 = getelementptr inbounds %struct.cmplx, ptr %2696, i32 %2704 + %2706 = getelementptr inbounds %struct.cmplx, ptr %2705, i32 0, i32 1 + store double %2695, ptr %2706, align 8 + %2707 = load ptr, ptr %11, align 4 + %2708 = load i32, ptr %44, align 4 + %2709 = sub i32 %2708, 1 + %2710 = load i32, ptr %7, align 4 + %2711 = sub i32 %2710, 1 + %2712 = mul i32 6, %2711 + %2713 = add i32 %2709, %2712 + %2714 = getelementptr inbounds %struct.cmplx, ptr %2707, i32 %2713 + %2715 = getelementptr inbounds %struct.cmplx, ptr %2714, i32 0, i32 0 + %2716 = load double, ptr %2715, align 8 + %2717 = getelementptr inbounds %struct.cmplx, ptr %69, i32 0, i32 0 + %2718 = load double, ptr %2717, align 8 + %2719 = load i32, ptr %12, align 4 + %2720 = sitofp i32 %2719 to double + %2721 = load ptr, ptr %11, align 4 + %2722 = load i32, ptr %44, align 4 + %2723 = sub i32 %2722, 1 + %2724 = load i32, ptr %7, align 4 + %2725 = sub i32 %2724, 1 + %2726 = mul i32 6, %2725 + %2727 = add i32 %2723, %2726 + %2728 = getelementptr inbounds %struct.cmplx, ptr %2721, i32 %2727 + %2729 = getelementptr inbounds %struct.cmplx, ptr %2728, i32 0, i32 1 + %2730 = load double, ptr %2729, align 8 + %2731 = fmul double %2720, %2730 + %2732 = getelementptr inbounds %struct.cmplx, ptr %69, i32 0, i32 1 + %2733 = load double, ptr %2732, align 8 + %2734 = fmul double %2731, %2733 + %2735 = fneg double %2734 + %2736 = call double @llvm.fmuladd.f64(double %2716, double %2718, double %2735) + %2737 = load ptr, ptr %10, align 4 + %2738 = load i32, ptr %44, align 4 + %2739 = load i32, ptr %7, align 4 + %2740 = load i32, ptr %24, align 4 + %2741 = load i32, ptr %8, align 4 + %2742 = mul i32 %2741, 7 + %2743 = add i32 %2740, %2742 + %2744 = mul i32 %2739, %2743 + %2745 = add i32 %2738, %2744 + %2746 = getelementptr inbounds %struct.cmplx, ptr %2737, i32 %2745 + %2747 = getelementptr inbounds %struct.cmplx, ptr %2746, i32 0, i32 0 + store double %2736, ptr %2747, align 8 + %2748 = load ptr, ptr %11, align 4 + %2749 = load i32, ptr %44, align 4 + %2750 = sub i32 %2749, 1 + %2751 = load i32, ptr %7, align 4 + %2752 = sub i32 %2751, 1 + %2753 = mul i32 6, %2752 + %2754 = add i32 %2750, %2753 + %2755 = getelementptr inbounds %struct.cmplx, ptr %2748, i32 %2754 + %2756 = getelementptr inbounds %struct.cmplx, ptr %2755, i32 0, i32 0 + %2757 = load double, ptr %2756, align 8 + %2758 = getelementptr inbounds %struct.cmplx, ptr %69, i32 0, i32 1 + %2759 = load double, ptr %2758, align 8 + %2760 = load i32, ptr %12, align 4 + %2761 = sitofp i32 %2760 to double + %2762 = load ptr, ptr %11, align 4 + %2763 = load i32, ptr %44, align 4 + %2764 = sub i32 %2763, 1 + %2765 = load i32, ptr %7, align 4 + %2766 = sub i32 %2765, 1 + %2767 = mul i32 6, %2766 + %2768 = add i32 %2764, %2767 + %2769 = getelementptr inbounds %struct.cmplx, ptr %2762, i32 %2768 + %2770 = getelementptr inbounds %struct.cmplx, ptr %2769, i32 0, i32 1 + %2771 = load double, ptr %2770, align 8 + %2772 = fmul double %2761, %2771 + %2773 = getelementptr inbounds %struct.cmplx, ptr %69, i32 0, i32 0 + %2774 = load double, ptr %2773, align 8 + %2775 = fmul double %2772, %2774 + %2776 = call double @llvm.fmuladd.f64(double %2757, double %2759, double %2775) + %2777 = load ptr, ptr %10, align 4 + %2778 = load i32, ptr %44, align 4 + %2779 = load i32, ptr %7, align 4 + %2780 = load i32, ptr %24, align 4 + %2781 = load i32, ptr %8, align 4 + %2782 = mul i32 %2781, 7 + %2783 = add i32 %2780, %2782 + %2784 = mul i32 %2779, %2783 + %2785 = add i32 %2778, %2784 + %2786 = getelementptr inbounds %struct.cmplx, ptr %2777, i32 %2785 + %2787 = getelementptr inbounds %struct.cmplx, ptr %2786, i32 0, i32 1 + store double %2776, ptr %2787, align 8 + br label %2788 + +2788: ; preds = %1168 + %2789 = load i32, ptr %44, align 4 + %2790 = add i32 %2789, 1 + store i32 %2790, ptr %44, align 4 + br label %1164, !llvm.loop !4 + +2791: ; preds = %1164 + br label %2792 + +2792: ; preds = %2791 + %2793 = load i32, ptr %24, align 4 + %2794 = add i32 %2793, 1 + store i32 %2794, ptr %24, align 4 + br label %87, !llvm.loop !6 + +2795: ; preds = %87 + ret void +} + +; Function Attrs: nocallback nofree nounwind willreturn memory(argmem: readwrite) +declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) #2 + +; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none) +declare double @llvm.fmuladd.f64(double, double, double) #3 + +attributes #0 = { noinline nounwind optnone uwtable "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="e500" "target-features"="+spe,-altivec,-bpermd,-crbits,-crypto,-direct-move,-extdiv,-htm,-isa-v206-instructions,-isa-v207-instructions,-isa-v30-instructions,-power8-vector,-power9-vector,-privileged,-quadword-atomics,-rop-protect,-vsx" } +attributes #1 = { "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="e500" "target-features"="+spe,-altivec,-bpermd,-crbits,-crypto,-direct-move,-extdiv,-htm,-isa-v206-instructions,-isa-v207-instructions,-isa-v30-instructions,-power8-vector,-power9-vector,-privileged,-quadword-atomics,-rop-protect,-vsx" } +attributes #2 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) } +attributes #3 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } + +!llvm.module.flags = !{!0, !1, !2} +!llvm.ident = !{!3} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 7, !"uwtable", i32 2} +!2 = !{i32 7, !"frame-pointer", i32 2} +!3 = !{!"clang version 17.0.0 (https://github.com/llvm/llvm-project.git 69db592f762ade86508826a7b3c9d5434c4837e2)"} +!4 = distinct !{!4, !5} +!5 = !{!"llvm.loop.mustprogress"} +!6 = distinct !{!6, !5} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; SPE: {{.*}}