Index: lib/Target/ARM/ARM.h =================================================================== --- lib/Target/ARM/ARM.h +++ lib/Target/ARM/ARM.h @@ -35,7 +35,7 @@ class MCInst; class PassRegistry; - +FunctionPass *createARMFinalizeLoopsPass(); Pass *createARMParallelDSPPass(); FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM, CodeGenOpt::Level OptLevel); @@ -65,6 +65,7 @@ void initializeARMExpandPseudoPass(PassRegistry &); void initializeThumb2SizeReducePass(PassRegistry &); void initializeMVEVPTBlockPass(PassRegistry &); +void initializeARMFinalizeLoopsPass(PassRegistry &); } // end namespace llvm Index: lib/Target/ARM/ARMFinalizeLoops.cpp =================================================================== --- /dev/null +++ lib/Target/ARM/ARMFinalizeLoops.cpp @@ -0,0 +1,276 @@ +//===-- ARMFinalizeHardwareLoops.cpp - Low-overhead Loops ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// Finalize v8.1-m low-overhead loops by converting the associated pseudo +/// instructions into machine operations. +/// +//===----------------------------------------------------------------------===// + +#include "ARM.h" +#include "ARMBaseInstrInfo.h" +#include "ARMBaseRegisterInfo.h" +#include "ARMBasicBlockInfo.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +using namespace llvm; + +#define DEBUG_TYPE "arm-finalize-loops" +#define ARM_FINALIZE_LOOPS_NAME "ARM loop finalization pass" + +namespace { + + class ARMFinalizeLoops : public MachineFunctionPass { + const ARMBaseInstrInfo *TII = nullptr; + MachineRegisterInfo *MRI = nullptr; + std::unique_ptr BBUtils = nullptr; + + public: + static char ID; + + ARMFinalizeLoops() : MachineFunctionPass(ID) { } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + bool ProcessLoop(MachineLoop *ML); + + void Expand(MachineLoop *ML, MachineInstr *Start, + MachineInstr *Dec, MachineInstr *End, bool Revert); + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::NoVRegs); + } + + StringRef getPassName() const override { + return ARM_FINALIZE_LOOPS_NAME; + } + }; +} + +char ARMFinalizeLoops::ID = 0; + +INITIALIZE_PASS(ARMFinalizeLoops, DEBUG_TYPE, ARM_FINALIZE_LOOPS_NAME, false, + false) + +bool ARMFinalizeLoops::runOnMachineFunction(MachineFunction &MF) { + LLVM_DEBUG(dbgs() << "ARM Loops on " << MF.getName() << " ------------- \n"); + + auto &MLI = getAnalysis(); + MRI = &MF.getRegInfo(); + TII = static_cast( + MF.getSubtarget().getInstrInfo()); + BBUtils = std::unique_ptr(new ARMBasicBlockUtils(MF)); + BBUtils->computeAllBlockSizes(); + + bool Changed = false; + for (auto ML : MLI) { + if (!ML->getParentLoop()) + Changed |= ProcessLoop(ML); + } + return Changed; +} + +bool ARMFinalizeLoops::ProcessLoop(MachineLoop *ML) { + + bool Changed = false; + + // Process inner loops first. + for (auto I = ML->begin(), E = ML->end(); I != E; ++I) + Changed |= ProcessLoop(*I); + + LLVM_DEBUG(dbgs() << "ARM Loops: Processing " << *ML); + + auto IsLoopStart = [](MachineInstr &MI) { + return MI.getOpcode() == ARM::t2DoLoopStart; + }; + + auto IsLoopDec = [](MachineInstr &MI) { + return MI.getOpcode() == ARM::t2LoopDec; + }; + + auto IsLoopEnd = [](MachineInstr &MI) { + return MI.getOpcode() == ARM::t2LoopEnd; + }; + + auto SearchForStart = [&IsLoopStart](MachineBasicBlock *MBB) -> MachineInstr* { + for (auto &MI : *MBB) { + if (IsLoopStart(MI)) + return &MI; + } + return nullptr; + }; + + MachineInstr *Start = nullptr; + MachineInstr *Dec = nullptr; + MachineInstr *End = nullptr; + bool Revert = false; + + if (auto *Preheader = ML->getLoopPreheader()) + Start = SearchForStart(Preheader); + + for (auto *MBB : ML->getBlocks()) { + for (auto &MI : *MBB) { + if (IsLoopDec(MI)) + Dec = &MI; + else if (IsLoopEnd(MI)) + End = &MI; + + // If we find that we load/store LR between LoopDec and LoopEnd, revert + // back to a 'normal' loop. + if (Dec) { + if (MI.mayLoad() || MI.mayStore()) + Revert = + MI.getOperand(0).isReg() && MI.getOperand(0).getReg() == ARM::LR; + if (MI.getDesc().isCall()) + Revert = true; + } + } + } + + if (Start || Dec || End) + assert((Start && Dec && End) && "Failed to find all loop components"); + else { + LLVM_DEBUG(dbgs() << "ARM Loops: Not a low-overhead loop.\n"); + return Changed; + } + + assert((End->getOperand(1).isMBB() && + End->getOperand(1).getMBB() == ML->getHeader()) && + "Expected LoopEnd to target Loop Header"); + + // The LE instructions has 12-bits for the label offset. + if (!BBUtils->isBBInRange(End, ML->getHeader(), 4096)) { + LLVM_DEBUG(dbgs() << "ARM Loops: Too large for a low-overhead loop!\n"); + Revert = true; + } + + LLVM_DEBUG(dbgs() << "ARM Loops:\n - Found Loop Start: " << *Start + << " - Found Loop Dec: " << *Dec + << " - Found Loop End: " << *End); + + Expand(ML, Start, Dec, End, Revert); + return true; +} + +void ARMFinalizeLoops::Expand(MachineLoop *ML, MachineInstr *Start, + MachineInstr *Dec, MachineInstr *End, + bool Revert) { + + auto ExpandLoopStart = [this](MachineLoop *ML, MachineInstr *Start) { + // The trip count should already been held in LR since the instructions + // within the loop can only read and write to LR. So, there should be a + // mov to setup the count. WLS/DLS perform this move, so find the original + // and delete it - inserting WLS/DLS in its place. + MachineBasicBlock *MBB = Start->getParent(); + MachineInstr *InsertPt = nullptr; + for (auto &I : MRI->def_instructions(ARM::LR)) { + if (I.getParent() != MBB) + continue; + + // Always execute. + if (!I.getOperand(2).isImm() || I.getOperand(2).getImm() != ARMCC::AL) + continue; + + // Only handle move reg, if the trip count it will need moving into a reg + // before the setup instruction anyway. + if (!I.getDesc().isMoveReg() || + !I.getOperand(1).isIdenticalTo(Start->getOperand(0))) + continue; + InsertPt = &I; + break; + } + + MachineInstrBuilder MIB = InsertPt ? + BuildMI(*MBB, InsertPt, Start->getDebugLoc(), TII->get(ARM::t2DLS)) : + BuildMI(*MBB, Start, Start->getDebugLoc(), TII->get(ARM::t2DLS)); + if (InsertPt) + InsertPt->eraseFromParent(); + + MIB.addDef(ARM::LR); + MIB.add(Start->getOperand(0)); + LLVM_DEBUG(dbgs() << "ARM Loops: Inserted DLS: " << *MIB); + Start->eraseFromParent(); + }; + + // Combine the LoopDec and LoopEnd instructions into LE(TP). + auto ExpandLoopEnd = [this](MachineLoop *ML, MachineInstr *Dec, + MachineInstr *End) { + MachineBasicBlock *MBB = End->getParent(); + MachineInstrBuilder MIB = BuildMI(*MBB, End, End->getDebugLoc(), + TII->get(ARM::t2LEUpdate)); + MIB.addDef(ARM::LR); + MIB.add(End->getOperand(0)); + MIB.add(End->getOperand(1)); + LLVM_DEBUG(dbgs() << "ARM Loops: Inserted LE: " << *MIB); + + // If there is a branch after loop end, which branches to the fallthrough + // block, remove the branch. + MachineBasicBlock *Latch = End->getParent(); + MachineInstr *Terminator = &Latch->instr_back(); + if (End != Terminator) { + MachineBasicBlock *Exit = ML->getExitBlock(); + if (Latch->isLayoutSuccessor(Exit)) { + LLVM_DEBUG(dbgs() << "ARM Loops: Removing loop exit branch: " + << *Terminator); + Terminator->eraseFromParent(); + } + } + End->eraseFromParent(); + Dec->eraseFromParent(); + }; + + // Generate a subs, or sub and cmp, and a branch instead of an LE. + // TODO: Check flags so that we can possibly generate a subs. + auto ExpandBranch = [this](MachineInstr *Dec, MachineInstr *End) { + LLVM_DEBUG(dbgs() << "ARM Loops: Reverting to sub, cmp, br.\n"); + // Create sub + MachineBasicBlock *MBB = Dec->getParent(); + MachineInstrBuilder MIB = BuildMI(*MBB, Dec, Dec->getDebugLoc(), + TII->get(ARM::t2SUBri)); + MIB.addDef(ARM::LR); + MIB.add(Dec->getOperand(1)); + MIB.add(Dec->getOperand(2)); + MIB.addImm(ARMCC::AL); + MIB.addReg(0); + MIB.addReg(0); + + // Create cmp + MBB = End->getParent(); + MIB = BuildMI(*MBB, End, End->getDebugLoc(), TII->get(ARM::t2CMPri)); + MIB.addReg(ARM::LR); + MIB.addImm(0); + MIB.addImm(ARMCC::AL); + + // Create bne + MIB = BuildMI(*MBB, End, End->getDebugLoc(), TII->get(ARM::t2Bcc)); + MIB.add(End->getOperand(1)); // branch target + MIB.addImm(ARMCC::NE); // condition code + End->eraseFromParent(); + Dec->eraseFromParent(); + }; + + if (Revert) { + Start->eraseFromParent(); + ExpandBranch(Dec, End); + } else { + ExpandLoopStart(ML, Start); + ExpandLoopEnd(ML, Dec, End); + } +} + +FunctionPass *llvm::createARMFinalizeLoopsPass() { + return new ARMFinalizeLoops(); +} Index: lib/Target/ARM/ARMISelDAGToDAG.cpp =================================================================== --- lib/Target/ARM/ARMISelDAGToDAG.cpp +++ lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -2985,6 +2985,39 @@ unsigned CC = (unsigned) cast(N2)->getZExtValue(); + // Handle low-overhead loops. + if (InFlag.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN) { + if (InFlag.getOpcode() == ARMISD::CMPZ) { + // Handle loops. + SDValue Int = InFlag.getOperand(0); + uint64_t ID = cast(Int->getOperand(1))->getZExtValue(); + + if (ID == Intrinsic::loop_decrement_reg) { + SDValue Elements = Int.getOperand(2); + SDValue Size = CurDAG->getTargetConstant( + cast(Int.getOperand(3))->getZExtValue(), dl, + MVT::i32); + + SDValue Args[] = { Elements, Size, Int.getOperand(0) }; + SDNode *LoopDec = + CurDAG->getMachineNode(ARM::t2LoopDec, dl, + CurDAG->getVTList(MVT::i32, MVT::Other), + Args); + ReplaceUses(Int.getNode(), LoopDec); + + SDValue EndArgs[] = { SDValue(LoopDec, 0), N1, Chain }; + SDNode *LoopEnd = + CurDAG->getMachineNode(ARM::t2LoopEnd, dl, MVT::Other, EndArgs); + + ReplaceUses(N, LoopEnd); + CurDAG->RemoveDeadNode(N); + CurDAG->RemoveDeadNode(InFlag.getNode()); + CurDAG->RemoveDeadNode(Int.getNode()); + return; + } + } + } + if (InFlag.getOpcode() == ARMISD::CMPZ) { bool SwitchEQNEToPLMI; SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI); Index: lib/Target/ARM/ARMInstrThumb2.td =================================================================== --- lib/Target/ARM/ARMInstrThumb2.td +++ lib/Target/ARM/ARMInstrThumb2.td @@ -5135,6 +5135,7 @@ let Predicates = [IsThumb2, HasV8_1MMainline, HasLOB]; } +let isNotDuplicable = 1 in { def t2WLS : t2LOL<(outs GPRlr:$LR), (ins rGPR:$Rn, wlslabel_u11:$label), "wls", "$LR, $Rn, $label"> { @@ -5178,6 +5179,20 @@ let Inst{10-1} = label{10-1}; } +def t2DoLoopStart : + t2PseudoInst<(outs), (ins rGPR:$elts), 4, IIC_Br, + [(int_set_loop_iterations rGPR:$elts)]>, Sched<[WriteBr]>; + +def t2LoopDec : + t2PseudoInst<(outs GPRlr:$Rm), (ins GPRlr:$Rn, imm0_7:$size), + 4, IIC_Br, []>, Sched<[WriteBr]>; + +let isBranch = 1, isTerminator = 1, hasSideEffects = 1 in +def t2LoopEnd : + t2PseudoInst<(outs), (ins GPRlr:$elts, brtarget:$target), + 4, IIC_Br, []>, Sched<[WriteBr]>; +} + class CS opcode, list pattern=[]> : V8_1MI<(outs rGPR:$Rd), (ins GPRwithZR:$Rn, GPRwithZR:$Rm, pred_noal:$fcond), AddrModeNone, NoItinerary, iname, "$Rd, $Rn, $Rm, $fcond", "", pattern> { Index: lib/Target/ARM/ARMTargetMachine.cpp =================================================================== --- lib/Target/ARM/ARMTargetMachine.cpp +++ lib/Target/ARM/ARMTargetMachine.cpp @@ -96,6 +96,7 @@ initializeARMExpandPseudoPass(Registry); initializeThumb2SizeReducePass(Registry); initializeMVEVPTBlockPass(Registry); + initializeARMFinalizeLoopsPass(Registry); } static std::unique_ptr createTLOF(const Triple &TT) { @@ -446,6 +447,9 @@ MergeExternalByDefault)); } + if (TM->getOptLevel() != CodeGenOpt::None) + addPass(createHardwareLoopsPass()); + return false; } @@ -525,5 +529,6 @@ if (getOptLevel() != CodeGenOpt::None) addPass(createARMOptimizeBarriersPass()); + addPass(createARMFinalizeLoopsPass()); addPass(createARMConstantIslandPass()); } Index: lib/Target/ARM/CMakeLists.txt =================================================================== --- lib/Target/ARM/CMakeLists.txt +++ lib/Target/ARM/CMakeLists.txt @@ -30,6 +30,7 @@ ARMConstantPoolValue.cpp ARMExpandPseudoInsts.cpp ARMFastISel.cpp + ARMFinalizeLoops.cpp ARMFrameLowering.cpp ARMHazardRecognizer.cpp ARMInstructionSelector.cpp Index: test/CodeGen/ARM/O3-pipeline.ll =================================================================== --- test/CodeGen/ARM/O3-pipeline.ll +++ test/CodeGen/ARM/O3-pipeline.ll @@ -49,6 +49,10 @@ ; CHECK-NEXT: Dominator Tree Construction ; CHECK-NEXT: Exception handling preparation ; CHECK-NEXT: Merge internal globals +; CHECK-NEXT: Dominator Tree Construction +; CHECK-NEXT: Natural Loop Information +; CHECK-NEXT: Scalar Evolution Analysis +; CHECK-NEXT: Hardware Loop Insertion ; CHECK-NEXT: Safe Stack instrumentation pass ; CHECK-NEXT: Insert stack protectors ; CHECK-NEXT: Module Verifier @@ -137,6 +141,9 @@ ; CHECK-NEXT: Thumb2 instruction size reduce pass ; CHECK-NEXT: Unpack machine instruction bundles ; CHECK-NEXT: optimise barriers pass +; CHECK-NEXT: MachineDominator Tree Construction +; CHECK-NEXT: Machine Natural Loop Construction +; CHECK-NEXT: ARM loop finalization pass ; CHECK-NEXT: ARM constant island placement and branch shortening pass ; CHECK-NEXT: Contiguously Lay Out Funclets ; CHECK-NEXT: StackMap Liveness Analysis Index: test/Transforms/HardwareLoops/ARM/calls.ll =================================================================== --- test/Transforms/HardwareLoops/ARM/calls.ll +++ test/Transforms/HardwareLoops/ARM/calls.ll @@ -3,7 +3,7 @@ ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+fp-armv8,+fullfp16 -hardware-loops -disable-arm-loloops=false %s -S -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FP64 ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -hardware-loops -disable-arm-loloops=false %s -S -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVE ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -hardware-loops -disable-arm-loloops=false %s -S -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-MVEFP - +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -disable-arm-loloops=false %s -o - | FileCheck %s --check-prefix=CHECK-LLC ; CHECK-LABEL: skip_call ; CHECK-NOT: call void @llvm.set.loop.iterations @@ -41,6 +41,15 @@ ; CHECK: [[CMP:%[^ ]+]] = icmp ne i32 [[LOOP_DEC]], 0 ; CHECK: br i1 [[CMP]], label %loop, label %exit +; CHECK-LLC-LABEL: test_target_specific: +; CHECK-LLC: mov.w lr, #50 +; CHECK-LLC: dls lr, lr +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9_]+]]: +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b . +; CHECK-LLC: @ %exit + define i32 @test_target_specific(i32* %a, i32* %b) { entry: br label %loop @@ -86,6 +95,17 @@ ; CHECK-MVE-NOT: call void @llvm.set.loop.iterations ; CHECK-FP: call void @llvm.set.loop.iterations.i32(i32 100) ; CHECK-MVEFP: call void @llvm.set.loop.iterations.i32(i32 100) + +; CHECK-LLC-LABEL: test_fabs: +; CHECK-LLC: mov.w lr, #100 +; CHECK-LLC: dls lr, lr +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9_]+]]: +; CHECK-LLC-NOT: bl +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b . +; CHECK-LLC: @ %exit + define float @test_fabs(float* %a) { entry: br label %loop Index: test/Transforms/HardwareLoops/ARM/cond-mov.mir =================================================================== --- /dev/null +++ test/Transforms/HardwareLoops/ARM/cond-mov.mir @@ -0,0 +1,115 @@ +# RUN: llc -mtriple=thumbv8.1m.main -run-pass=arm-finalize-loops %s -o - | FileCheck %s +# CHECK: $lr = tMOVr $r0, 13, $noreg +# CHECK: $lr = t2DLS killed $r0 +# CHECK: $lr = t2LEUpdate renamable $lr, %bb.1 + +--- | + target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "thumbv8.1m.main" + + define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) { + entry: + %scevgep = getelementptr i32, i32* %q, i32 -1 + %scevgep3 = getelementptr i32, i32* %p, i32 -1 + call void @llvm.set.loop.iterations.i32(i32 %n) + br label %while.body + + while.body: + %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %entry ] + %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %entry ] + %0 = phi i32 [ %n, %entry ], [ %2, %while.body ] + %scevgep2 = getelementptr i32, i32* %lsr.iv, i32 1 + %scevgep6 = getelementptr i32, i32* %lsr.iv4, i32 1 + %1 = load i32, i32* %scevgep2, align 4 + store i32 %1, i32* %scevgep6, align 4 + %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1 + %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1 + %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %3 = icmp ne i32 %2, 0 + br i1 %3, label %while.body, label %while.end + + while.end: + ret i32 0 + } + + declare void @llvm.set.loop.iterations.i32(i32) #0 + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0 + declare void @llvm.stackprotector(i8*, i8**) #1 + + attributes #0 = { noduplicate nounwind } + attributes #1 = { nounwind } + +... +--- +name: do_copy +alignment: 1 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 8 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r7, $lr + + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r7, killed $lr + frame-setup CFI_INSTRUCTION def_cfa_offset 8 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + $lr = tMOVr $r0, 13, $noreg + t2DoLoopStart killed $r0 + renamable $r0 = t2SUBri killed renamable $r1, 4, 14, $noreg, $noreg + renamable $r1 = t2SUBri killed renamable $r2, 4, 14, $noreg, $noreg + + bb.1.while.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $r0, $r1 + + renamable $r2, renamable $r1 = t2LDR_PRE killed renamable $r1, 4, 14, $noreg :: (load 4 from %ir.scevgep2) + early-clobber renamable $r0 = t2STR_PRE killed renamable $r2, killed renamable $r0, 4, 14, $noreg :: (store 4 into %ir.scevgep6) + renamable $lr = t2LoopDec killed renamable $lr, 1 + t2LoopEnd renamable $lr, %bb.1 + t2B %bb.2, 14, $noreg + + bb.2.while.end: + $r0 = t2MOVi 0, 14, $noreg, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r7, def $pc, implicit killed $r0 + +... Index: test/Transforms/HardwareLoops/ARM/massive.mir =================================================================== --- /dev/null +++ test/Transforms/HardwareLoops/ARM/massive.mir @@ -0,0 +1,3438 @@ +# RUN: llc -mtriple=armv8.1m.main -run-pass=arm-finalize-loops %s -o - | FileCheck %s +# CHECK: body: +# CHECK-NOT: dls lr +# CHECK-NOT: le lr + +--- | + target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "armv8.1m.main" + + define dso_local arm_aapcscc void @massive(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c) local_unnamed_addr { + entry: + call void @llvm.set.loop.iterations.i32(i32 4) + br label %for.body + + for.cond.cleanup: ; preds = %for.body + ret void + + for.body: ; preds = %for.body, %entry + %i.08 = phi i32 [ 0, %entry ], [ %inc.255, %for.body ] + %count = phi i32 [ 4, %entry ], [ %loop.dec, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.08 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %c, i32 %i.08 + %1 = load i32, i32* %arrayidx1, align 4 + %mul = mul nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 %i.08 + store i32 %mul, i32* %arrayidx2, align 4 + %inc = or i32 %i.08, 1 + %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 %inc + %2 = load i32, i32* %arrayidx.1, align 4 + %arrayidx1.1 = getelementptr inbounds i32, i32* %c, i32 %inc + %3 = load i32, i32* %arrayidx1.1, align 4 + %mul.1 = mul nsw i32 %3, %2 + %arrayidx2.1 = getelementptr inbounds i32, i32* %a, i32 %inc + store i32 %mul.1, i32* %arrayidx2.1, align 4 + %inc.1 = or i32 %i.08, 2 + %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 %inc.1 + %4 = load i32, i32* %arrayidx.2, align 4 + %arrayidx1.2 = getelementptr inbounds i32, i32* %c, i32 %inc.1 + %5 = load i32, i32* %arrayidx1.2, align 4 + %mul.2 = mul nsw i32 %5, %4 + %arrayidx2.2 = getelementptr inbounds i32, i32* %a, i32 %inc.1 + store i32 %mul.2, i32* %arrayidx2.2, align 4 + %inc.2 = or i32 %i.08, 3 + %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 %inc.2 + %6 = load i32, i32* %arrayidx.3, align 4 + %arrayidx1.3 = getelementptr inbounds i32, i32* %c, i32 %inc.2 + %7 = load i32, i32* %arrayidx1.3, align 4 + %mul.3 = mul nsw i32 %7, %6 + %arrayidx2.3 = getelementptr inbounds i32, i32* %a, i32 %inc.2 + store i32 %mul.3, i32* %arrayidx2.3, align 4 + %inc.3 = or i32 %i.08, 4 + %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 %inc.3 + %8 = load i32, i32* %arrayidx.4, align 4 + %arrayidx1.4 = getelementptr inbounds i32, i32* %c, i32 %inc.3 + %9 = load i32, i32* %arrayidx1.4, align 4 + %mul.4 = mul nsw i32 %9, %8 + %arrayidx2.4 = getelementptr inbounds i32, i32* %a, i32 %inc.3 + store i32 %mul.4, i32* %arrayidx2.4, align 4 + %inc.4 = or i32 %i.08, 5 + %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 %inc.4 + %10 = load i32, i32* %arrayidx.5, align 4 + %arrayidx1.5 = getelementptr inbounds i32, i32* %c, i32 %inc.4 + %11 = load i32, i32* %arrayidx1.5, align 4 + %mul.5 = mul nsw i32 %11, %10 + %arrayidx2.5 = getelementptr inbounds i32, i32* %a, i32 %inc.4 + store i32 %mul.5, i32* %arrayidx2.5, align 4 + %inc.5 = or i32 %i.08, 6 + %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 %inc.5 + %12 = load i32, i32* %arrayidx.6, align 4 + %arrayidx1.6 = getelementptr inbounds i32, i32* %c, i32 %inc.5 + %13 = load i32, i32* %arrayidx1.6, align 4 + %mul.6 = mul nsw i32 %13, %12 + %arrayidx2.6 = getelementptr inbounds i32, i32* %a, i32 %inc.5 + store i32 %mul.6, i32* %arrayidx2.6, align 4 + %inc.6 = or i32 %i.08, 7 + %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 %inc.6 + %14 = load i32, i32* %arrayidx.7, align 4 + %arrayidx1.7 = getelementptr inbounds i32, i32* %c, i32 %inc.6 + %15 = load i32, i32* %arrayidx1.7, align 4 + %mul.7 = mul nsw i32 %15, %14 + %arrayidx2.7 = getelementptr inbounds i32, i32* %a, i32 %inc.6 + store i32 %mul.7, i32* %arrayidx2.7, align 4 + %inc.7 = or i32 %i.08, 8 + %arrayidx.8 = getelementptr inbounds i32, i32* %b, i32 %inc.7 + %16 = load i32, i32* %arrayidx.8, align 4 + %arrayidx1.8 = getelementptr inbounds i32, i32* %c, i32 %inc.7 + %17 = load i32, i32* %arrayidx1.8, align 4 + %mul.8 = mul nsw i32 %17, %16 + %arrayidx2.8 = getelementptr inbounds i32, i32* %a, i32 %inc.7 + store i32 %mul.8, i32* %arrayidx2.8, align 4 + %inc.8 = or i32 %i.08, 9 + %arrayidx.9 = getelementptr inbounds i32, i32* %b, i32 %inc.8 + %18 = load i32, i32* %arrayidx.9, align 4 + %arrayidx1.9 = getelementptr inbounds i32, i32* %c, i32 %inc.8 + %19 = load i32, i32* %arrayidx1.9, align 4 + %mul.9 = mul nsw i32 %19, %18 + %arrayidx2.9 = getelementptr inbounds i32, i32* %a, i32 %inc.8 + store i32 %mul.9, i32* %arrayidx2.9, align 4 + %inc.9 = or i32 %i.08, 10 + %arrayidx.10 = getelementptr inbounds i32, i32* %b, i32 %inc.9 + %20 = load i32, i32* %arrayidx.10, align 4 + %arrayidx1.10 = getelementptr inbounds i32, i32* %c, i32 %inc.9 + %21 = load i32, i32* %arrayidx1.10, align 4 + %mul.10 = mul nsw i32 %21, %20 + %arrayidx2.10 = getelementptr inbounds i32, i32* %a, i32 %inc.9 + store i32 %mul.10, i32* %arrayidx2.10, align 4 + %inc.10 = or i32 %i.08, 11 + %arrayidx.11 = getelementptr inbounds i32, i32* %b, i32 %inc.10 + %22 = load i32, i32* %arrayidx.11, align 4 + %arrayidx1.11 = getelementptr inbounds i32, i32* %c, i32 %inc.10 + %23 = load i32, i32* %arrayidx1.11, align 4 + %mul.11 = mul nsw i32 %23, %22 + %arrayidx2.11 = getelementptr inbounds i32, i32* %a, i32 %inc.10 + store i32 %mul.11, i32* %arrayidx2.11, align 4 + %inc.11 = or i32 %i.08, 12 + %arrayidx.12 = getelementptr inbounds i32, i32* %b, i32 %inc.11 + %24 = load i32, i32* %arrayidx.12, align 4 + %arrayidx1.12 = getelementptr inbounds i32, i32* %c, i32 %inc.11 + %25 = load i32, i32* %arrayidx1.12, align 4 + %mul.12 = mul nsw i32 %25, %24 + %arrayidx2.12 = getelementptr inbounds i32, i32* %a, i32 %inc.11 + store i32 %mul.12, i32* %arrayidx2.12, align 4 + %inc.12 = or i32 %i.08, 13 + %arrayidx.13 = getelementptr inbounds i32, i32* %b, i32 %inc.12 + %26 = load i32, i32* %arrayidx.13, align 4 + %arrayidx1.13 = getelementptr inbounds i32, i32* %c, i32 %inc.12 + %27 = load i32, i32* %arrayidx1.13, align 4 + %mul.13 = mul nsw i32 %27, %26 + %arrayidx2.13 = getelementptr inbounds i32, i32* %a, i32 %inc.12 + store i32 %mul.13, i32* %arrayidx2.13, align 4 + %inc.13 = or i32 %i.08, 14 + %arrayidx.14 = getelementptr inbounds i32, i32* %b, i32 %inc.13 + %28 = load i32, i32* %arrayidx.14, align 4 + %arrayidx1.14 = getelementptr inbounds i32, i32* %c, i32 %inc.13 + %29 = load i32, i32* %arrayidx1.14, align 4 + %mul.14 = mul nsw i32 %29, %28 + %arrayidx2.14 = getelementptr inbounds i32, i32* %a, i32 %inc.13 + store i32 %mul.14, i32* %arrayidx2.14, align 4 + %inc.14 = or i32 %i.08, 15 + %arrayidx.15 = getelementptr inbounds i32, i32* %b, i32 %inc.14 + %30 = load i32, i32* %arrayidx.15, align 4 + %arrayidx1.15 = getelementptr inbounds i32, i32* %c, i32 %inc.14 + %31 = load i32, i32* %arrayidx1.15, align 4 + %mul.15 = mul nsw i32 %31, %30 + %arrayidx2.15 = getelementptr inbounds i32, i32* %a, i32 %inc.14 + store i32 %mul.15, i32* %arrayidx2.15, align 4 + %inc.15 = or i32 %i.08, 16 + %arrayidx.16 = getelementptr inbounds i32, i32* %b, i32 %inc.15 + %32 = load i32, i32* %arrayidx.16, align 4 + %arrayidx1.16 = getelementptr inbounds i32, i32* %c, i32 %inc.15 + %33 = load i32, i32* %arrayidx1.16, align 4 + %mul.16 = mul nsw i32 %33, %32 + %arrayidx2.16 = getelementptr inbounds i32, i32* %a, i32 %inc.15 + store i32 %mul.16, i32* %arrayidx2.16, align 4 + %inc.16 = or i32 %i.08, 17 + %arrayidx.17 = getelementptr inbounds i32, i32* %b, i32 %inc.16 + %34 = load i32, i32* %arrayidx.17, align 4 + %arrayidx1.17 = getelementptr inbounds i32, i32* %c, i32 %inc.16 + %35 = load i32, i32* %arrayidx1.17, align 4 + %mul.17 = mul nsw i32 %35, %34 + %arrayidx2.17 = getelementptr inbounds i32, i32* %a, i32 %inc.16 + store i32 %mul.17, i32* %arrayidx2.17, align 4 + %inc.17 = or i32 %i.08, 18 + %arrayidx.18 = getelementptr inbounds i32, i32* %b, i32 %inc.17 + %36 = load i32, i32* %arrayidx.18, align 4 + %arrayidx1.18 = getelementptr inbounds i32, i32* %c, i32 %inc.17 + %37 = load i32, i32* %arrayidx1.18, align 4 + %mul.18 = mul nsw i32 %37, %36 + %arrayidx2.18 = getelementptr inbounds i32, i32* %a, i32 %inc.17 + store i32 %mul.18, i32* %arrayidx2.18, align 4 + %inc.18 = or i32 %i.08, 19 + %arrayidx.19 = getelementptr inbounds i32, i32* %b, i32 %inc.18 + %38 = load i32, i32* %arrayidx.19, align 4 + %arrayidx1.19 = getelementptr inbounds i32, i32* %c, i32 %inc.18 + %39 = load i32, i32* %arrayidx1.19, align 4 + %mul.19 = mul nsw i32 %39, %38 + %arrayidx2.19 = getelementptr inbounds i32, i32* %a, i32 %inc.18 + store i32 %mul.19, i32* %arrayidx2.19, align 4 + %inc.19 = or i32 %i.08, 20 + %arrayidx.20 = getelementptr inbounds i32, i32* %b, i32 %inc.19 + %40 = load i32, i32* %arrayidx.20, align 4 + %arrayidx1.20 = getelementptr inbounds i32, i32* %c, i32 %inc.19 + %41 = load i32, i32* %arrayidx1.20, align 4 + %mul.20 = mul nsw i32 %41, %40 + %arrayidx2.20 = getelementptr inbounds i32, i32* %a, i32 %inc.19 + store i32 %mul.20, i32* %arrayidx2.20, align 4 + %inc.20 = or i32 %i.08, 21 + %arrayidx.21 = getelementptr inbounds i32, i32* %b, i32 %inc.20 + %42 = load i32, i32* %arrayidx.21, align 4 + %arrayidx1.21 = getelementptr inbounds i32, i32* %c, i32 %inc.20 + %43 = load i32, i32* %arrayidx1.21, align 4 + %mul.21 = mul nsw i32 %43, %42 + %arrayidx2.21 = getelementptr inbounds i32, i32* %a, i32 %inc.20 + store i32 %mul.21, i32* %arrayidx2.21, align 4 + %inc.21 = or i32 %i.08, 22 + %arrayidx.22 = getelementptr inbounds i32, i32* %b, i32 %inc.21 + %44 = load i32, i32* %arrayidx.22, align 4 + %arrayidx1.22 = getelementptr inbounds i32, i32* %c, i32 %inc.21 + %45 = load i32, i32* %arrayidx1.22, align 4 + %mul.22 = mul nsw i32 %45, %44 + %arrayidx2.22 = getelementptr inbounds i32, i32* %a, i32 %inc.21 + store i32 %mul.22, i32* %arrayidx2.22, align 4 + %inc.22 = or i32 %i.08, 23 + %arrayidx.23 = getelementptr inbounds i32, i32* %b, i32 %inc.22 + %46 = load i32, i32* %arrayidx.23, align 4 + %arrayidx1.23 = getelementptr inbounds i32, i32* %c, i32 %inc.22 + %47 = load i32, i32* %arrayidx1.23, align 4 + %mul.23 = mul nsw i32 %47, %46 + %arrayidx2.23 = getelementptr inbounds i32, i32* %a, i32 %inc.22 + store i32 %mul.23, i32* %arrayidx2.23, align 4 + %inc.23 = or i32 %i.08, 24 + %arrayidx.24 = getelementptr inbounds i32, i32* %b, i32 %inc.23 + %48 = load i32, i32* %arrayidx.24, align 4 + %arrayidx1.24 = getelementptr inbounds i32, i32* %c, i32 %inc.23 + %49 = load i32, i32* %arrayidx1.24, align 4 + %mul.24 = mul nsw i32 %49, %48 + %arrayidx2.24 = getelementptr inbounds i32, i32* %a, i32 %inc.23 + store i32 %mul.24, i32* %arrayidx2.24, align 4 + %inc.24 = or i32 %i.08, 25 + %arrayidx.25 = getelementptr inbounds i32, i32* %b, i32 %inc.24 + %50 = load i32, i32* %arrayidx.25, align 4 + %arrayidx1.25 = getelementptr inbounds i32, i32* %c, i32 %inc.24 + %51 = load i32, i32* %arrayidx1.25, align 4 + %mul.25 = mul nsw i32 %51, %50 + %arrayidx2.25 = getelementptr inbounds i32, i32* %a, i32 %inc.24 + store i32 %mul.25, i32* %arrayidx2.25, align 4 + %inc.25 = or i32 %i.08, 26 + %arrayidx.26 = getelementptr inbounds i32, i32* %b, i32 %inc.25 + %52 = load i32, i32* %arrayidx.26, align 4 + %arrayidx1.26 = getelementptr inbounds i32, i32* %c, i32 %inc.25 + %53 = load i32, i32* %arrayidx1.26, align 4 + %mul.26 = mul nsw i32 %53, %52 + %arrayidx2.26 = getelementptr inbounds i32, i32* %a, i32 %inc.25 + store i32 %mul.26, i32* %arrayidx2.26, align 4 + %inc.26 = or i32 %i.08, 27 + %arrayidx.27 = getelementptr inbounds i32, i32* %b, i32 %inc.26 + %54 = load i32, i32* %arrayidx.27, align 4 + %arrayidx1.27 = getelementptr inbounds i32, i32* %c, i32 %inc.26 + %55 = load i32, i32* %arrayidx1.27, align 4 + %mul.27 = mul nsw i32 %55, %54 + %arrayidx2.27 = getelementptr inbounds i32, i32* %a, i32 %inc.26 + store i32 %mul.27, i32* %arrayidx2.27, align 4 + %inc.27 = or i32 %i.08, 28 + %arrayidx.28 = getelementptr inbounds i32, i32* %b, i32 %inc.27 + %56 = load i32, i32* %arrayidx.28, align 4 + %arrayidx1.28 = getelementptr inbounds i32, i32* %c, i32 %inc.27 + %57 = load i32, i32* %arrayidx1.28, align 4 + %mul.28 = mul nsw i32 %57, %56 + %arrayidx2.28 = getelementptr inbounds i32, i32* %a, i32 %inc.27 + store i32 %mul.28, i32* %arrayidx2.28, align 4 + %inc.28 = or i32 %i.08, 29 + %arrayidx.29 = getelementptr inbounds i32, i32* %b, i32 %inc.28 + %58 = load i32, i32* %arrayidx.29, align 4 + %arrayidx1.29 = getelementptr inbounds i32, i32* %c, i32 %inc.28 + %59 = load i32, i32* %arrayidx1.29, align 4 + %mul.29 = mul nsw i32 %59, %58 + %arrayidx2.29 = getelementptr inbounds i32, i32* %a, i32 %inc.28 + store i32 %mul.29, i32* %arrayidx2.29, align 4 + %inc.29 = or i32 %i.08, 30 + %arrayidx.30 = getelementptr inbounds i32, i32* %b, i32 %inc.29 + %60 = load i32, i32* %arrayidx.30, align 4 + %arrayidx1.30 = getelementptr inbounds i32, i32* %c, i32 %inc.29 + %61 = load i32, i32* %arrayidx1.30, align 4 + %mul.30 = mul nsw i32 %61, %60 + %arrayidx2.30 = getelementptr inbounds i32, i32* %a, i32 %inc.29 + store i32 %mul.30, i32* %arrayidx2.30, align 4 + %inc.30 = or i32 %i.08, 31 + %arrayidx.31 = getelementptr inbounds i32, i32* %b, i32 %inc.30 + %62 = load i32, i32* %arrayidx.31, align 4 + %arrayidx1.31 = getelementptr inbounds i32, i32* %c, i32 %inc.30 + %63 = load i32, i32* %arrayidx1.31, align 4 + %mul.31 = mul nsw i32 %63, %62 + %arrayidx2.31 = getelementptr inbounds i32, i32* %a, i32 %inc.30 + store i32 %mul.31, i32* %arrayidx2.31, align 4 + %inc.31 = or i32 %i.08, 32 + %arrayidx.32 = getelementptr inbounds i32, i32* %b, i32 %inc.31 + %64 = load i32, i32* %arrayidx.32, align 4 + %arrayidx1.32 = getelementptr inbounds i32, i32* %c, i32 %inc.31 + %65 = load i32, i32* %arrayidx1.32, align 4 + %mul.32 = mul nsw i32 %65, %64 + %arrayidx2.32 = getelementptr inbounds i32, i32* %a, i32 %inc.31 + store i32 %mul.32, i32* %arrayidx2.32, align 4 + %inc.32 = or i32 %i.08, 33 + %arrayidx.33 = getelementptr inbounds i32, i32* %b, i32 %inc.32 + %66 = load i32, i32* %arrayidx.33, align 4 + %arrayidx1.33 = getelementptr inbounds i32, i32* %c, i32 %inc.32 + %67 = load i32, i32* %arrayidx1.33, align 4 + %mul.33 = mul nsw i32 %67, %66 + %arrayidx2.33 = getelementptr inbounds i32, i32* %a, i32 %inc.32 + store i32 %mul.33, i32* %arrayidx2.33, align 4 + %inc.33 = or i32 %i.08, 34 + %arrayidx.34 = getelementptr inbounds i32, i32* %b, i32 %inc.33 + %68 = load i32, i32* %arrayidx.34, align 4 + %arrayidx1.34 = getelementptr inbounds i32, i32* %c, i32 %inc.33 + %69 = load i32, i32* %arrayidx1.34, align 4 + %mul.34 = mul nsw i32 %69, %68 + %arrayidx2.34 = getelementptr inbounds i32, i32* %a, i32 %inc.33 + store i32 %mul.34, i32* %arrayidx2.34, align 4 + %inc.34 = or i32 %i.08, 35 + %arrayidx.35 = getelementptr inbounds i32, i32* %b, i32 %inc.34 + %70 = load i32, i32* %arrayidx.35, align 4 + %arrayidx1.35 = getelementptr inbounds i32, i32* %c, i32 %inc.34 + %71 = load i32, i32* %arrayidx1.35, align 4 + %mul.35 = mul nsw i32 %71, %70 + %arrayidx2.35 = getelementptr inbounds i32, i32* %a, i32 %inc.34 + store i32 %mul.35, i32* %arrayidx2.35, align 4 + %inc.35 = or i32 %i.08, 36 + %arrayidx.36 = getelementptr inbounds i32, i32* %b, i32 %inc.35 + %72 = load i32, i32* %arrayidx.36, align 4 + %arrayidx1.36 = getelementptr inbounds i32, i32* %c, i32 %inc.35 + %73 = load i32, i32* %arrayidx1.36, align 4 + %mul.36 = mul nsw i32 %73, %72 + %arrayidx2.36 = getelementptr inbounds i32, i32* %a, i32 %inc.35 + store i32 %mul.36, i32* %arrayidx2.36, align 4 + %inc.36 = or i32 %i.08, 37 + %arrayidx.37 = getelementptr inbounds i32, i32* %b, i32 %inc.36 + %74 = load i32, i32* %arrayidx.37, align 4 + %arrayidx1.37 = getelementptr inbounds i32, i32* %c, i32 %inc.36 + %75 = load i32, i32* %arrayidx1.37, align 4 + %mul.37 = mul nsw i32 %75, %74 + %arrayidx2.37 = getelementptr inbounds i32, i32* %a, i32 %inc.36 + store i32 %mul.37, i32* %arrayidx2.37, align 4 + %inc.37 = or i32 %i.08, 38 + %arrayidx.38 = getelementptr inbounds i32, i32* %b, i32 %inc.37 + %76 = load i32, i32* %arrayidx.38, align 4 + %arrayidx1.38 = getelementptr inbounds i32, i32* %c, i32 %inc.37 + %77 = load i32, i32* %arrayidx1.38, align 4 + %mul.38 = mul nsw i32 %77, %76 + %arrayidx2.38 = getelementptr inbounds i32, i32* %a, i32 %inc.37 + store i32 %mul.38, i32* %arrayidx2.38, align 4 + %inc.38 = or i32 %i.08, 39 + %arrayidx.39 = getelementptr inbounds i32, i32* %b, i32 %inc.38 + %78 = load i32, i32* %arrayidx.39, align 4 + %arrayidx1.39 = getelementptr inbounds i32, i32* %c, i32 %inc.38 + %79 = load i32, i32* %arrayidx1.39, align 4 + %mul.39 = mul nsw i32 %79, %78 + %arrayidx2.39 = getelementptr inbounds i32, i32* %a, i32 %inc.38 + store i32 %mul.39, i32* %arrayidx2.39, align 4 + %inc.39 = or i32 %i.08, 40 + %arrayidx.40 = getelementptr inbounds i32, i32* %b, i32 %inc.39 + %80 = load i32, i32* %arrayidx.40, align 4 + %arrayidx1.40 = getelementptr inbounds i32, i32* %c, i32 %inc.39 + %81 = load i32, i32* %arrayidx1.40, align 4 + %mul.40 = mul nsw i32 %81, %80 + %arrayidx2.40 = getelementptr inbounds i32, i32* %a, i32 %inc.39 + store i32 %mul.40, i32* %arrayidx2.40, align 4 + %inc.40 = or i32 %i.08, 41 + %arrayidx.41 = getelementptr inbounds i32, i32* %b, i32 %inc.40 + %82 = load i32, i32* %arrayidx.41, align 4 + %arrayidx1.41 = getelementptr inbounds i32, i32* %c, i32 %inc.40 + %83 = load i32, i32* %arrayidx1.41, align 4 + %mul.41 = mul nsw i32 %83, %82 + %arrayidx2.41 = getelementptr inbounds i32, i32* %a, i32 %inc.40 + store i32 %mul.41, i32* %arrayidx2.41, align 4 + %inc.41 = or i32 %i.08, 42 + %arrayidx.42 = getelementptr inbounds i32, i32* %b, i32 %inc.41 + %84 = load i32, i32* %arrayidx.42, align 4 + %arrayidx1.42 = getelementptr inbounds i32, i32* %c, i32 %inc.41 + %85 = load i32, i32* %arrayidx1.42, align 4 + %mul.42 = mul nsw i32 %85, %84 + %arrayidx2.42 = getelementptr inbounds i32, i32* %a, i32 %inc.41 + store i32 %mul.42, i32* %arrayidx2.42, align 4 + %inc.42 = or i32 %i.08, 43 + %arrayidx.43 = getelementptr inbounds i32, i32* %b, i32 %inc.42 + %86 = load i32, i32* %arrayidx.43, align 4 + %arrayidx1.43 = getelementptr inbounds i32, i32* %c, i32 %inc.42 + %87 = load i32, i32* %arrayidx1.43, align 4 + %mul.43 = mul nsw i32 %87, %86 + %arrayidx2.43 = getelementptr inbounds i32, i32* %a, i32 %inc.42 + store i32 %mul.43, i32* %arrayidx2.43, align 4 + %inc.43 = or i32 %i.08, 44 + %arrayidx.44 = getelementptr inbounds i32, i32* %b, i32 %inc.43 + %88 = load i32, i32* %arrayidx.44, align 4 + %arrayidx1.44 = getelementptr inbounds i32, i32* %c, i32 %inc.43 + %89 = load i32, i32* %arrayidx1.44, align 4 + %mul.44 = mul nsw i32 %89, %88 + %arrayidx2.44 = getelementptr inbounds i32, i32* %a, i32 %inc.43 + store i32 %mul.44, i32* %arrayidx2.44, align 4 + %inc.44 = or i32 %i.08, 45 + %arrayidx.45 = getelementptr inbounds i32, i32* %b, i32 %inc.44 + %90 = load i32, i32* %arrayidx.45, align 4 + %arrayidx1.45 = getelementptr inbounds i32, i32* %c, i32 %inc.44 + %91 = load i32, i32* %arrayidx1.45, align 4 + %mul.45 = mul nsw i32 %91, %90 + %arrayidx2.45 = getelementptr inbounds i32, i32* %a, i32 %inc.44 + store i32 %mul.45, i32* %arrayidx2.45, align 4 + %inc.45 = or i32 %i.08, 46 + %arrayidx.46 = getelementptr inbounds i32, i32* %b, i32 %inc.45 + %92 = load i32, i32* %arrayidx.46, align 4 + %arrayidx1.46 = getelementptr inbounds i32, i32* %c, i32 %inc.45 + %93 = load i32, i32* %arrayidx1.46, align 4 + %mul.46 = mul nsw i32 %93, %92 + %arrayidx2.46 = getelementptr inbounds i32, i32* %a, i32 %inc.45 + store i32 %mul.46, i32* %arrayidx2.46, align 4 + %inc.46 = or i32 %i.08, 47 + %arrayidx.47 = getelementptr inbounds i32, i32* %b, i32 %inc.46 + %94 = load i32, i32* %arrayidx.47, align 4 + %arrayidx1.47 = getelementptr inbounds i32, i32* %c, i32 %inc.46 + %95 = load i32, i32* %arrayidx1.47, align 4 + %mul.47 = mul nsw i32 %95, %94 + %arrayidx2.47 = getelementptr inbounds i32, i32* %a, i32 %inc.46 + store i32 %mul.47, i32* %arrayidx2.47, align 4 + %inc.47 = or i32 %i.08, 48 + %arrayidx.48 = getelementptr inbounds i32, i32* %b, i32 %inc.47 + %96 = load i32, i32* %arrayidx.48, align 4 + %arrayidx1.48 = getelementptr inbounds i32, i32* %c, i32 %inc.47 + %97 = load i32, i32* %arrayidx1.48, align 4 + %mul.48 = mul nsw i32 %97, %96 + %arrayidx2.48 = getelementptr inbounds i32, i32* %a, i32 %inc.47 + store i32 %mul.48, i32* %arrayidx2.48, align 4 + %inc.48 = or i32 %i.08, 49 + %arrayidx.49 = getelementptr inbounds i32, i32* %b, i32 %inc.48 + %98 = load i32, i32* %arrayidx.49, align 4 + %arrayidx1.49 = getelementptr inbounds i32, i32* %c, i32 %inc.48 + %99 = load i32, i32* %arrayidx1.49, align 4 + %mul.49 = mul nsw i32 %99, %98 + %arrayidx2.49 = getelementptr inbounds i32, i32* %a, i32 %inc.48 + store i32 %mul.49, i32* %arrayidx2.49, align 4 + %inc.49 = or i32 %i.08, 50 + %arrayidx.50 = getelementptr inbounds i32, i32* %b, i32 %inc.49 + %100 = load i32, i32* %arrayidx.50, align 4 + %arrayidx1.50 = getelementptr inbounds i32, i32* %c, i32 %inc.49 + %101 = load i32, i32* %arrayidx1.50, align 4 + %mul.50 = mul nsw i32 %101, %100 + %arrayidx2.50 = getelementptr inbounds i32, i32* %a, i32 %inc.49 + store i32 %mul.50, i32* %arrayidx2.50, align 4 + %inc.50 = or i32 %i.08, 51 + %arrayidx.51 = getelementptr inbounds i32, i32* %b, i32 %inc.50 + %102 = load i32, i32* %arrayidx.51, align 4 + %arrayidx1.51 = getelementptr inbounds i32, i32* %c, i32 %inc.50 + %103 = load i32, i32* %arrayidx1.51, align 4 + %mul.51 = mul nsw i32 %103, %102 + %arrayidx2.51 = getelementptr inbounds i32, i32* %a, i32 %inc.50 + store i32 %mul.51, i32* %arrayidx2.51, align 4 + %inc.51 = or i32 %i.08, 52 + %arrayidx.52 = getelementptr inbounds i32, i32* %b, i32 %inc.51 + %104 = load i32, i32* %arrayidx.52, align 4 + %arrayidx1.52 = getelementptr inbounds i32, i32* %c, i32 %inc.51 + %105 = load i32, i32* %arrayidx1.52, align 4 + %mul.52 = mul nsw i32 %105, %104 + %arrayidx2.52 = getelementptr inbounds i32, i32* %a, i32 %inc.51 + store i32 %mul.52, i32* %arrayidx2.52, align 4 + %inc.52 = or i32 %i.08, 53 + %arrayidx.53 = getelementptr inbounds i32, i32* %b, i32 %inc.52 + %106 = load i32, i32* %arrayidx.53, align 4 + %arrayidx1.53 = getelementptr inbounds i32, i32* %c, i32 %inc.52 + %107 = load i32, i32* %arrayidx1.53, align 4 + %mul.53 = mul nsw i32 %107, %106 + %arrayidx2.53 = getelementptr inbounds i32, i32* %a, i32 %inc.52 + store i32 %mul.53, i32* %arrayidx2.53, align 4 + %inc.53 = or i32 %i.08, 54 + %arrayidx.54 = getelementptr inbounds i32, i32* %b, i32 %inc.53 + %108 = load i32, i32* %arrayidx.54, align 4 + %arrayidx1.54 = getelementptr inbounds i32, i32* %c, i32 %inc.53 + %109 = load i32, i32* %arrayidx1.54, align 4 + %mul.54 = mul nsw i32 %109, %108 + %arrayidx2.54 = getelementptr inbounds i32, i32* %a, i32 %inc.53 + store i32 %mul.54, i32* %arrayidx2.54, align 4 + %inc.54 = or i32 %i.08, 55 + %arrayidx.55 = getelementptr inbounds i32, i32* %b, i32 %inc.54 + %110 = load i32, i32* %arrayidx.55, align 4 + %arrayidx1.55 = getelementptr inbounds i32, i32* %c, i32 %inc.54 + %111 = load i32, i32* %arrayidx1.55, align 4 + %mul.55 = mul nsw i32 %111, %110 + %arrayidx2.55 = getelementptr inbounds i32, i32* %a, i32 %inc.54 + store i32 %mul.55, i32* %arrayidx2.55, align 4 + %inc.55 = or i32 %i.08, 56 + %arrayidx.56 = getelementptr inbounds i32, i32* %b, i32 %inc.55 + %112 = load i32, i32* %arrayidx.56, align 4 + %arrayidx1.56 = getelementptr inbounds i32, i32* %c, i32 %inc.55 + %113 = load i32, i32* %arrayidx1.56, align 4 + %mul.56 = mul nsw i32 %113, %112 + %arrayidx2.56 = getelementptr inbounds i32, i32* %a, i32 %inc.55 + store i32 %mul.56, i32* %arrayidx2.56, align 4 + %inc.56 = or i32 %i.08, 57 + %arrayidx.57 = getelementptr inbounds i32, i32* %b, i32 %inc.56 + %114 = load i32, i32* %arrayidx.57, align 4 + %arrayidx1.57 = getelementptr inbounds i32, i32* %c, i32 %inc.56 + %115 = load i32, i32* %arrayidx1.57, align 4 + %mul.57 = mul nsw i32 %115, %114 + %arrayidx2.57 = getelementptr inbounds i32, i32* %a, i32 %inc.56 + store i32 %mul.57, i32* %arrayidx2.57, align 4 + %inc.57 = or i32 %i.08, 58 + %arrayidx.58 = getelementptr inbounds i32, i32* %b, i32 %inc.57 + %116 = load i32, i32* %arrayidx.58, align 4 + %arrayidx1.58 = getelementptr inbounds i32, i32* %c, i32 %inc.57 + %117 = load i32, i32* %arrayidx1.58, align 4 + %mul.58 = mul nsw i32 %117, %116 + %arrayidx2.58 = getelementptr inbounds i32, i32* %a, i32 %inc.57 + store i32 %mul.58, i32* %arrayidx2.58, align 4 + %inc.58 = or i32 %i.08, 59 + %arrayidx.59 = getelementptr inbounds i32, i32* %b, i32 %inc.58 + %118 = load i32, i32* %arrayidx.59, align 4 + %arrayidx1.59 = getelementptr inbounds i32, i32* %c, i32 %inc.58 + %119 = load i32, i32* %arrayidx1.59, align 4 + %mul.59 = mul nsw i32 %119, %118 + %arrayidx2.59 = getelementptr inbounds i32, i32* %a, i32 %inc.58 + store i32 %mul.59, i32* %arrayidx2.59, align 4 + %inc.59 = or i32 %i.08, 60 + %arrayidx.60 = getelementptr inbounds i32, i32* %b, i32 %inc.59 + %120 = load i32, i32* %arrayidx.60, align 4 + %arrayidx1.60 = getelementptr inbounds i32, i32* %c, i32 %inc.59 + %121 = load i32, i32* %arrayidx1.60, align 4 + %mul.60 = mul nsw i32 %121, %120 + %arrayidx2.60 = getelementptr inbounds i32, i32* %a, i32 %inc.59 + store i32 %mul.60, i32* %arrayidx2.60, align 4 + %inc.60 = or i32 %i.08, 61 + %arrayidx.61 = getelementptr inbounds i32, i32* %b, i32 %inc.60 + %122 = load i32, i32* %arrayidx.61, align 4 + %arrayidx1.61 = getelementptr inbounds i32, i32* %c, i32 %inc.60 + %123 = load i32, i32* %arrayidx1.61, align 4 + %mul.61 = mul nsw i32 %123, %122 + %arrayidx2.61 = getelementptr inbounds i32, i32* %a, i32 %inc.60 + store i32 %mul.61, i32* %arrayidx2.61, align 4 + %inc.61 = or i32 %i.08, 62 + %arrayidx.62 = getelementptr inbounds i32, i32* %b, i32 %inc.61 + %124 = load i32, i32* %arrayidx.62, align 4 + %arrayidx1.62 = getelementptr inbounds i32, i32* %c, i32 %inc.61 + %125 = load i32, i32* %arrayidx1.62, align 4 + %mul.62 = mul nsw i32 %125, %124 + %arrayidx2.62 = getelementptr inbounds i32, i32* %a, i32 %inc.61 + store i32 %mul.62, i32* %arrayidx2.62, align 4 + %inc.62 = or i32 %i.08, 63 + %arrayidx.63 = getelementptr inbounds i32, i32* %b, i32 %inc.62 + %126 = load i32, i32* %arrayidx.63, align 4 + %arrayidx1.63 = getelementptr inbounds i32, i32* %c, i32 %inc.62 + %127 = load i32, i32* %arrayidx1.63, align 4 + %mul.63 = mul nsw i32 %127, %126 + %arrayidx2.63 = getelementptr inbounds i32, i32* %a, i32 %inc.62 + store i32 %mul.63, i32* %arrayidx2.63, align 4 + %inc.63 = or i32 %i.08, 64 + %arrayidx.64 = getelementptr inbounds i32, i32* %b, i32 %inc.63 + %128 = load i32, i32* %arrayidx.64, align 4 + %arrayidx1.64 = getelementptr inbounds i32, i32* %c, i32 %inc.63 + %129 = load i32, i32* %arrayidx1.64, align 4 + %mul.64 = mul nsw i32 %129, %128 + %arrayidx2.64 = getelementptr inbounds i32, i32* %a, i32 %inc.63 + store i32 %mul.64, i32* %arrayidx2.64, align 4 + %inc.64 = or i32 %i.08, 65 + %arrayidx.65 = getelementptr inbounds i32, i32* %b, i32 %inc.64 + %130 = load i32, i32* %arrayidx.65, align 4 + %arrayidx1.65 = getelementptr inbounds i32, i32* %c, i32 %inc.64 + %131 = load i32, i32* %arrayidx1.65, align 4 + %mul.65 = mul nsw i32 %131, %130 + %arrayidx2.65 = getelementptr inbounds i32, i32* %a, i32 %inc.64 + store i32 %mul.65, i32* %arrayidx2.65, align 4 + %inc.65 = or i32 %i.08, 66 + %arrayidx.66 = getelementptr inbounds i32, i32* %b, i32 %inc.65 + %132 = load i32, i32* %arrayidx.66, align 4 + %arrayidx1.66 = getelementptr inbounds i32, i32* %c, i32 %inc.65 + %133 = load i32, i32* %arrayidx1.66, align 4 + %mul.66 = mul nsw i32 %133, %132 + %arrayidx2.66 = getelementptr inbounds i32, i32* %a, i32 %inc.65 + store i32 %mul.66, i32* %arrayidx2.66, align 4 + %inc.66 = or i32 %i.08, 67 + %arrayidx.67 = getelementptr inbounds i32, i32* %b, i32 %inc.66 + %134 = load i32, i32* %arrayidx.67, align 4 + %arrayidx1.67 = getelementptr inbounds i32, i32* %c, i32 %inc.66 + %135 = load i32, i32* %arrayidx1.67, align 4 + %mul.67 = mul nsw i32 %135, %134 + %arrayidx2.67 = getelementptr inbounds i32, i32* %a, i32 %inc.66 + store i32 %mul.67, i32* %arrayidx2.67, align 4 + %inc.67 = or i32 %i.08, 68 + %arrayidx.68 = getelementptr inbounds i32, i32* %b, i32 %inc.67 + %136 = load i32, i32* %arrayidx.68, align 4 + %arrayidx1.68 = getelementptr inbounds i32, i32* %c, i32 %inc.67 + %137 = load i32, i32* %arrayidx1.68, align 4 + %mul.68 = mul nsw i32 %137, %136 + %arrayidx2.68 = getelementptr inbounds i32, i32* %a, i32 %inc.67 + store i32 %mul.68, i32* %arrayidx2.68, align 4 + %inc.68 = or i32 %i.08, 69 + %arrayidx.69 = getelementptr inbounds i32, i32* %b, i32 %inc.68 + %138 = load i32, i32* %arrayidx.69, align 4 + %arrayidx1.69 = getelementptr inbounds i32, i32* %c, i32 %inc.68 + %139 = load i32, i32* %arrayidx1.69, align 4 + %mul.69 = mul nsw i32 %139, %138 + %arrayidx2.69 = getelementptr inbounds i32, i32* %a, i32 %inc.68 + store i32 %mul.69, i32* %arrayidx2.69, align 4 + %inc.69 = or i32 %i.08, 70 + %arrayidx.70 = getelementptr inbounds i32, i32* %b, i32 %inc.69 + %140 = load i32, i32* %arrayidx.70, align 4 + %arrayidx1.70 = getelementptr inbounds i32, i32* %c, i32 %inc.69 + %141 = load i32, i32* %arrayidx1.70, align 4 + %mul.70 = mul nsw i32 %141, %140 + %arrayidx2.70 = getelementptr inbounds i32, i32* %a, i32 %inc.69 + store i32 %mul.70, i32* %arrayidx2.70, align 4 + %inc.70 = or i32 %i.08, 71 + %arrayidx.71 = getelementptr inbounds i32, i32* %b, i32 %inc.70 + %142 = load i32, i32* %arrayidx.71, align 4 + %arrayidx1.71 = getelementptr inbounds i32, i32* %c, i32 %inc.70 + %143 = load i32, i32* %arrayidx1.71, align 4 + %mul.71 = mul nsw i32 %143, %142 + %arrayidx2.71 = getelementptr inbounds i32, i32* %a, i32 %inc.70 + store i32 %mul.71, i32* %arrayidx2.71, align 4 + %inc.71 = or i32 %i.08, 72 + %arrayidx.72 = getelementptr inbounds i32, i32* %b, i32 %inc.71 + %144 = load i32, i32* %arrayidx.72, align 4 + %arrayidx1.72 = getelementptr inbounds i32, i32* %c, i32 %inc.71 + %145 = load i32, i32* %arrayidx1.72, align 4 + %mul.72 = mul nsw i32 %145, %144 + %arrayidx2.72 = getelementptr inbounds i32, i32* %a, i32 %inc.71 + store i32 %mul.72, i32* %arrayidx2.72, align 4 + %inc.72 = or i32 %i.08, 73 + %arrayidx.73 = getelementptr inbounds i32, i32* %b, i32 %inc.72 + %146 = load i32, i32* %arrayidx.73, align 4 + %arrayidx1.73 = getelementptr inbounds i32, i32* %c, i32 %inc.72 + %147 = load i32, i32* %arrayidx1.73, align 4 + %mul.73 = mul nsw i32 %147, %146 + %arrayidx2.73 = getelementptr inbounds i32, i32* %a, i32 %inc.72 + store i32 %mul.73, i32* %arrayidx2.73, align 4 + %inc.73 = or i32 %i.08, 74 + %arrayidx.74 = getelementptr inbounds i32, i32* %b, i32 %inc.73 + %148 = load i32, i32* %arrayidx.74, align 4 + %arrayidx1.74 = getelementptr inbounds i32, i32* %c, i32 %inc.73 + %149 = load i32, i32* %arrayidx1.74, align 4 + %mul.74 = mul nsw i32 %149, %148 + %arrayidx2.74 = getelementptr inbounds i32, i32* %a, i32 %inc.73 + store i32 %mul.74, i32* %arrayidx2.74, align 4 + %inc.74 = or i32 %i.08, 75 + %arrayidx.75 = getelementptr inbounds i32, i32* %b, i32 %inc.74 + %150 = load i32, i32* %arrayidx.75, align 4 + %arrayidx1.75 = getelementptr inbounds i32, i32* %c, i32 %inc.74 + %151 = load i32, i32* %arrayidx1.75, align 4 + %mul.75 = mul nsw i32 %151, %150 + %arrayidx2.75 = getelementptr inbounds i32, i32* %a, i32 %inc.74 + store i32 %mul.75, i32* %arrayidx2.75, align 4 + %inc.75 = or i32 %i.08, 76 + %arrayidx.76 = getelementptr inbounds i32, i32* %b, i32 %inc.75 + %152 = load i32, i32* %arrayidx.76, align 4 + %arrayidx1.76 = getelementptr inbounds i32, i32* %c, i32 %inc.75 + %153 = load i32, i32* %arrayidx1.76, align 4 + %mul.76 = mul nsw i32 %153, %152 + %arrayidx2.76 = getelementptr inbounds i32, i32* %a, i32 %inc.75 + store i32 %mul.76, i32* %arrayidx2.76, align 4 + %inc.76 = or i32 %i.08, 77 + %arrayidx.77 = getelementptr inbounds i32, i32* %b, i32 %inc.76 + %154 = load i32, i32* %arrayidx.77, align 4 + %arrayidx1.77 = getelementptr inbounds i32, i32* %c, i32 %inc.76 + %155 = load i32, i32* %arrayidx1.77, align 4 + %mul.77 = mul nsw i32 %155, %154 + %arrayidx2.77 = getelementptr inbounds i32, i32* %a, i32 %inc.76 + store i32 %mul.77, i32* %arrayidx2.77, align 4 + %inc.77 = or i32 %i.08, 78 + %arrayidx.78 = getelementptr inbounds i32, i32* %b, i32 %inc.77 + %156 = load i32, i32* %arrayidx.78, align 4 + %arrayidx1.78 = getelementptr inbounds i32, i32* %c, i32 %inc.77 + %157 = load i32, i32* %arrayidx1.78, align 4 + %mul.78 = mul nsw i32 %157, %156 + %arrayidx2.78 = getelementptr inbounds i32, i32* %a, i32 %inc.77 + store i32 %mul.78, i32* %arrayidx2.78, align 4 + %inc.78 = or i32 %i.08, 79 + %arrayidx.79 = getelementptr inbounds i32, i32* %b, i32 %inc.78 + %158 = load i32, i32* %arrayidx.79, align 4 + %arrayidx1.79 = getelementptr inbounds i32, i32* %c, i32 %inc.78 + %159 = load i32, i32* %arrayidx1.79, align 4 + %mul.79 = mul nsw i32 %159, %158 + %arrayidx2.79 = getelementptr inbounds i32, i32* %a, i32 %inc.78 + store i32 %mul.79, i32* %arrayidx2.79, align 4 + %inc.79 = or i32 %i.08, 80 + %arrayidx.80 = getelementptr inbounds i32, i32* %b, i32 %inc.79 + %160 = load i32, i32* %arrayidx.80, align 4 + %arrayidx1.80 = getelementptr inbounds i32, i32* %c, i32 %inc.79 + %161 = load i32, i32* %arrayidx1.80, align 4 + %mul.80 = mul nsw i32 %161, %160 + %arrayidx2.80 = getelementptr inbounds i32, i32* %a, i32 %inc.79 + store i32 %mul.80, i32* %arrayidx2.80, align 4 + %inc.80 = or i32 %i.08, 81 + %arrayidx.81 = getelementptr inbounds i32, i32* %b, i32 %inc.80 + %162 = load i32, i32* %arrayidx.81, align 4 + %arrayidx1.81 = getelementptr inbounds i32, i32* %c, i32 %inc.80 + %163 = load i32, i32* %arrayidx1.81, align 4 + %mul.81 = mul nsw i32 %163, %162 + %arrayidx2.81 = getelementptr inbounds i32, i32* %a, i32 %inc.80 + store i32 %mul.81, i32* %arrayidx2.81, align 4 + %inc.81 = or i32 %i.08, 82 + %arrayidx.82 = getelementptr inbounds i32, i32* %b, i32 %inc.81 + %164 = load i32, i32* %arrayidx.82, align 4 + %arrayidx1.82 = getelementptr inbounds i32, i32* %c, i32 %inc.81 + %165 = load i32, i32* %arrayidx1.82, align 4 + %mul.82 = mul nsw i32 %165, %164 + %arrayidx2.82 = getelementptr inbounds i32, i32* %a, i32 %inc.81 + store i32 %mul.82, i32* %arrayidx2.82, align 4 + %inc.82 = or i32 %i.08, 83 + %arrayidx.83 = getelementptr inbounds i32, i32* %b, i32 %inc.82 + %166 = load i32, i32* %arrayidx.83, align 4 + %arrayidx1.83 = getelementptr inbounds i32, i32* %c, i32 %inc.82 + %167 = load i32, i32* %arrayidx1.83, align 4 + %mul.83 = mul nsw i32 %167, %166 + %arrayidx2.83 = getelementptr inbounds i32, i32* %a, i32 %inc.82 + store i32 %mul.83, i32* %arrayidx2.83, align 4 + %inc.83 = or i32 %i.08, 84 + %arrayidx.84 = getelementptr inbounds i32, i32* %b, i32 %inc.83 + %168 = load i32, i32* %arrayidx.84, align 4 + %arrayidx1.84 = getelementptr inbounds i32, i32* %c, i32 %inc.83 + %169 = load i32, i32* %arrayidx1.84, align 4 + %mul.84 = mul nsw i32 %169, %168 + %arrayidx2.84 = getelementptr inbounds i32, i32* %a, i32 %inc.83 + store i32 %mul.84, i32* %arrayidx2.84, align 4 + %inc.84 = or i32 %i.08, 85 + %arrayidx.85 = getelementptr inbounds i32, i32* %b, i32 %inc.84 + %170 = load i32, i32* %arrayidx.85, align 4 + %arrayidx1.85 = getelementptr inbounds i32, i32* %c, i32 %inc.84 + %171 = load i32, i32* %arrayidx1.85, align 4 + %mul.85 = mul nsw i32 %171, %170 + %arrayidx2.85 = getelementptr inbounds i32, i32* %a, i32 %inc.84 + store i32 %mul.85, i32* %arrayidx2.85, align 4 + %inc.85 = or i32 %i.08, 86 + %arrayidx.86 = getelementptr inbounds i32, i32* %b, i32 %inc.85 + %172 = load i32, i32* %arrayidx.86, align 4 + %arrayidx1.86 = getelementptr inbounds i32, i32* %c, i32 %inc.85 + %173 = load i32, i32* %arrayidx1.86, align 4 + %mul.86 = mul nsw i32 %173, %172 + %arrayidx2.86 = getelementptr inbounds i32, i32* %a, i32 %inc.85 + store i32 %mul.86, i32* %arrayidx2.86, align 4 + %inc.86 = or i32 %i.08, 87 + %arrayidx.87 = getelementptr inbounds i32, i32* %b, i32 %inc.86 + %174 = load i32, i32* %arrayidx.87, align 4 + %arrayidx1.87 = getelementptr inbounds i32, i32* %c, i32 %inc.86 + %175 = load i32, i32* %arrayidx1.87, align 4 + %mul.87 = mul nsw i32 %175, %174 + %arrayidx2.87 = getelementptr inbounds i32, i32* %a, i32 %inc.86 + store i32 %mul.87, i32* %arrayidx2.87, align 4 + %inc.87 = or i32 %i.08, 88 + %arrayidx.88 = getelementptr inbounds i32, i32* %b, i32 %inc.87 + %176 = load i32, i32* %arrayidx.88, align 4 + %arrayidx1.88 = getelementptr inbounds i32, i32* %c, i32 %inc.87 + %177 = load i32, i32* %arrayidx1.88, align 4 + %mul.88 = mul nsw i32 %177, %176 + %arrayidx2.88 = getelementptr inbounds i32, i32* %a, i32 %inc.87 + store i32 %mul.88, i32* %arrayidx2.88, align 4 + %inc.88 = or i32 %i.08, 89 + %arrayidx.89 = getelementptr inbounds i32, i32* %b, i32 %inc.88 + %178 = load i32, i32* %arrayidx.89, align 4 + %arrayidx1.89 = getelementptr inbounds i32, i32* %c, i32 %inc.88 + %179 = load i32, i32* %arrayidx1.89, align 4 + %mul.89 = mul nsw i32 %179, %178 + %arrayidx2.89 = getelementptr inbounds i32, i32* %a, i32 %inc.88 + store i32 %mul.89, i32* %arrayidx2.89, align 4 + %inc.89 = or i32 %i.08, 90 + %arrayidx.90 = getelementptr inbounds i32, i32* %b, i32 %inc.89 + %180 = load i32, i32* %arrayidx.90, align 4 + %arrayidx1.90 = getelementptr inbounds i32, i32* %c, i32 %inc.89 + %181 = load i32, i32* %arrayidx1.90, align 4 + %mul.90 = mul nsw i32 %181, %180 + %arrayidx2.90 = getelementptr inbounds i32, i32* %a, i32 %inc.89 + store i32 %mul.90, i32* %arrayidx2.90, align 4 + %inc.90 = or i32 %i.08, 91 + %arrayidx.91 = getelementptr inbounds i32, i32* %b, i32 %inc.90 + %182 = load i32, i32* %arrayidx.91, align 4 + %arrayidx1.91 = getelementptr inbounds i32, i32* %c, i32 %inc.90 + %183 = load i32, i32* %arrayidx1.91, align 4 + %mul.91 = mul nsw i32 %183, %182 + %arrayidx2.91 = getelementptr inbounds i32, i32* %a, i32 %inc.90 + store i32 %mul.91, i32* %arrayidx2.91, align 4 + %inc.91 = or i32 %i.08, 92 + %arrayidx.92 = getelementptr inbounds i32, i32* %b, i32 %inc.91 + %184 = load i32, i32* %arrayidx.92, align 4 + %arrayidx1.92 = getelementptr inbounds i32, i32* %c, i32 %inc.91 + %185 = load i32, i32* %arrayidx1.92, align 4 + %mul.92 = mul nsw i32 %185, %184 + %arrayidx2.92 = getelementptr inbounds i32, i32* %a, i32 %inc.91 + store i32 %mul.92, i32* %arrayidx2.92, align 4 + %inc.92 = or i32 %i.08, 93 + %arrayidx.93 = getelementptr inbounds i32, i32* %b, i32 %inc.92 + %186 = load i32, i32* %arrayidx.93, align 4 + %arrayidx1.93 = getelementptr inbounds i32, i32* %c, i32 %inc.92 + %187 = load i32, i32* %arrayidx1.93, align 4 + %mul.93 = mul nsw i32 %187, %186 + %arrayidx2.93 = getelementptr inbounds i32, i32* %a, i32 %inc.92 + store i32 %mul.93, i32* %arrayidx2.93, align 4 + %inc.93 = or i32 %i.08, 94 + %arrayidx.94 = getelementptr inbounds i32, i32* %b, i32 %inc.93 + %188 = load i32, i32* %arrayidx.94, align 4 + %arrayidx1.94 = getelementptr inbounds i32, i32* %c, i32 %inc.93 + %189 = load i32, i32* %arrayidx1.94, align 4 + %mul.94 = mul nsw i32 %189, %188 + %arrayidx2.94 = getelementptr inbounds i32, i32* %a, i32 %inc.93 + store i32 %mul.94, i32* %arrayidx2.94, align 4 + %inc.94 = or i32 %i.08, 95 + %arrayidx.95 = getelementptr inbounds i32, i32* %b, i32 %inc.94 + %190 = load i32, i32* %arrayidx.95, align 4 + %arrayidx1.95 = getelementptr inbounds i32, i32* %c, i32 %inc.94 + %191 = load i32, i32* %arrayidx1.95, align 4 + %mul.95 = mul nsw i32 %191, %190 + %arrayidx2.95 = getelementptr inbounds i32, i32* %a, i32 %inc.94 + store i32 %mul.95, i32* %arrayidx2.95, align 4 + %inc.95 = or i32 %i.08, 96 + %arrayidx.96 = getelementptr inbounds i32, i32* %b, i32 %inc.95 + %192 = load i32, i32* %arrayidx.96, align 4 + %arrayidx1.96 = getelementptr inbounds i32, i32* %c, i32 %inc.95 + %193 = load i32, i32* %arrayidx1.96, align 4 + %mul.96 = mul nsw i32 %193, %192 + %arrayidx2.96 = getelementptr inbounds i32, i32* %a, i32 %inc.95 + store i32 %mul.96, i32* %arrayidx2.96, align 4 + %inc.96 = or i32 %i.08, 97 + %arrayidx.97 = getelementptr inbounds i32, i32* %b, i32 %inc.96 + %194 = load i32, i32* %arrayidx.97, align 4 + %arrayidx1.97 = getelementptr inbounds i32, i32* %c, i32 %inc.96 + %195 = load i32, i32* %arrayidx1.97, align 4 + %mul.97 = mul nsw i32 %195, %194 + %arrayidx2.97 = getelementptr inbounds i32, i32* %a, i32 %inc.96 + store i32 %mul.97, i32* %arrayidx2.97, align 4 + %inc.97 = or i32 %i.08, 98 + %arrayidx.98 = getelementptr inbounds i32, i32* %b, i32 %inc.97 + %196 = load i32, i32* %arrayidx.98, align 4 + %arrayidx1.98 = getelementptr inbounds i32, i32* %c, i32 %inc.97 + %197 = load i32, i32* %arrayidx1.98, align 4 + %mul.98 = mul nsw i32 %197, %196 + %arrayidx2.98 = getelementptr inbounds i32, i32* %a, i32 %inc.97 + store i32 %mul.98, i32* %arrayidx2.98, align 4 + %inc.98 = or i32 %i.08, 99 + %arrayidx.99 = getelementptr inbounds i32, i32* %b, i32 %inc.98 + %198 = load i32, i32* %arrayidx.99, align 4 + %arrayidx1.99 = getelementptr inbounds i32, i32* %c, i32 %inc.98 + %199 = load i32, i32* %arrayidx1.99, align 4 + %mul.99 = mul nsw i32 %199, %198 + %arrayidx2.99 = getelementptr inbounds i32, i32* %a, i32 %inc.98 + store i32 %mul.99, i32* %arrayidx2.99, align 4 + %inc.99 = or i32 %i.08, 100 + %arrayidx.100 = getelementptr inbounds i32, i32* %b, i32 %inc.99 + %200 = load i32, i32* %arrayidx.100, align 4 + %arrayidx1.100 = getelementptr inbounds i32, i32* %c, i32 %inc.99 + %201 = load i32, i32* %arrayidx1.100, align 4 + %mul.100 = mul nsw i32 %201, %200 + %arrayidx2.100 = getelementptr inbounds i32, i32* %a, i32 %inc.99 + store i32 %mul.100, i32* %arrayidx2.100, align 4 + %inc.100 = or i32 %i.08, 101 + %arrayidx.101 = getelementptr inbounds i32, i32* %b, i32 %inc.100 + %202 = load i32, i32* %arrayidx.101, align 4 + %arrayidx1.101 = getelementptr inbounds i32, i32* %c, i32 %inc.100 + %203 = load i32, i32* %arrayidx1.101, align 4 + %mul.101 = mul nsw i32 %203, %202 + %arrayidx2.101 = getelementptr inbounds i32, i32* %a, i32 %inc.100 + store i32 %mul.101, i32* %arrayidx2.101, align 4 + %inc.101 = or i32 %i.08, 102 + %arrayidx.102 = getelementptr inbounds i32, i32* %b, i32 %inc.101 + %204 = load i32, i32* %arrayidx.102, align 4 + %arrayidx1.102 = getelementptr inbounds i32, i32* %c, i32 %inc.101 + %205 = load i32, i32* %arrayidx1.102, align 4 + %mul.102 = mul nsw i32 %205, %204 + %arrayidx2.102 = getelementptr inbounds i32, i32* %a, i32 %inc.101 + store i32 %mul.102, i32* %arrayidx2.102, align 4 + %inc.102 = or i32 %i.08, 103 + %arrayidx.103 = getelementptr inbounds i32, i32* %b, i32 %inc.102 + %206 = load i32, i32* %arrayidx.103, align 4 + %arrayidx1.103 = getelementptr inbounds i32, i32* %c, i32 %inc.102 + %207 = load i32, i32* %arrayidx1.103, align 4 + %mul.103 = mul nsw i32 %207, %206 + %arrayidx2.103 = getelementptr inbounds i32, i32* %a, i32 %inc.102 + store i32 %mul.103, i32* %arrayidx2.103, align 4 + %inc.103 = or i32 %i.08, 104 + %arrayidx.104 = getelementptr inbounds i32, i32* %b, i32 %inc.103 + %208 = load i32, i32* %arrayidx.104, align 4 + %arrayidx1.104 = getelementptr inbounds i32, i32* %c, i32 %inc.103 + %209 = load i32, i32* %arrayidx1.104, align 4 + %mul.104 = mul nsw i32 %209, %208 + %arrayidx2.104 = getelementptr inbounds i32, i32* %a, i32 %inc.103 + store i32 %mul.104, i32* %arrayidx2.104, align 4 + %inc.104 = or i32 %i.08, 105 + %arrayidx.105 = getelementptr inbounds i32, i32* %b, i32 %inc.104 + %210 = load i32, i32* %arrayidx.105, align 4 + %arrayidx1.105 = getelementptr inbounds i32, i32* %c, i32 %inc.104 + %211 = load i32, i32* %arrayidx1.105, align 4 + %mul.105 = mul nsw i32 %211, %210 + %arrayidx2.105 = getelementptr inbounds i32, i32* %a, i32 %inc.104 + store i32 %mul.105, i32* %arrayidx2.105, align 4 + %inc.105 = or i32 %i.08, 106 + %arrayidx.106 = getelementptr inbounds i32, i32* %b, i32 %inc.105 + %212 = load i32, i32* %arrayidx.106, align 4 + %arrayidx1.106 = getelementptr inbounds i32, i32* %c, i32 %inc.105 + %213 = load i32, i32* %arrayidx1.106, align 4 + %mul.106 = mul nsw i32 %213, %212 + %arrayidx2.106 = getelementptr inbounds i32, i32* %a, i32 %inc.105 + store i32 %mul.106, i32* %arrayidx2.106, align 4 + %inc.106 = or i32 %i.08, 107 + %arrayidx.107 = getelementptr inbounds i32, i32* %b, i32 %inc.106 + %214 = load i32, i32* %arrayidx.107, align 4 + %arrayidx1.107 = getelementptr inbounds i32, i32* %c, i32 %inc.106 + %215 = load i32, i32* %arrayidx1.107, align 4 + %mul.107 = mul nsw i32 %215, %214 + %arrayidx2.107 = getelementptr inbounds i32, i32* %a, i32 %inc.106 + store i32 %mul.107, i32* %arrayidx2.107, align 4 + %inc.107 = or i32 %i.08, 108 + %arrayidx.108 = getelementptr inbounds i32, i32* %b, i32 %inc.107 + %216 = load i32, i32* %arrayidx.108, align 4 + %arrayidx1.108 = getelementptr inbounds i32, i32* %c, i32 %inc.107 + %217 = load i32, i32* %arrayidx1.108, align 4 + %mul.108 = mul nsw i32 %217, %216 + %arrayidx2.108 = getelementptr inbounds i32, i32* %a, i32 %inc.107 + store i32 %mul.108, i32* %arrayidx2.108, align 4 + %inc.108 = or i32 %i.08, 109 + %arrayidx.109 = getelementptr inbounds i32, i32* %b, i32 %inc.108 + %218 = load i32, i32* %arrayidx.109, align 4 + %arrayidx1.109 = getelementptr inbounds i32, i32* %c, i32 %inc.108 + %219 = load i32, i32* %arrayidx1.109, align 4 + %mul.109 = mul nsw i32 %219, %218 + %arrayidx2.109 = getelementptr inbounds i32, i32* %a, i32 %inc.108 + store i32 %mul.109, i32* %arrayidx2.109, align 4 + %inc.109 = or i32 %i.08, 110 + %arrayidx.110 = getelementptr inbounds i32, i32* %b, i32 %inc.109 + %220 = load i32, i32* %arrayidx.110, align 4 + %arrayidx1.110 = getelementptr inbounds i32, i32* %c, i32 %inc.109 + %221 = load i32, i32* %arrayidx1.110, align 4 + %mul.110 = mul nsw i32 %221, %220 + %arrayidx2.110 = getelementptr inbounds i32, i32* %a, i32 %inc.109 + store i32 %mul.110, i32* %arrayidx2.110, align 4 + %inc.110 = or i32 %i.08, 111 + %arrayidx.111 = getelementptr inbounds i32, i32* %b, i32 %inc.110 + %222 = load i32, i32* %arrayidx.111, align 4 + %arrayidx1.111 = getelementptr inbounds i32, i32* %c, i32 %inc.110 + %223 = load i32, i32* %arrayidx1.111, align 4 + %mul.111 = mul nsw i32 %223, %222 + %arrayidx2.111 = getelementptr inbounds i32, i32* %a, i32 %inc.110 + store i32 %mul.111, i32* %arrayidx2.111, align 4 + %inc.111 = or i32 %i.08, 112 + %arrayidx.112 = getelementptr inbounds i32, i32* %b, i32 %inc.111 + %224 = load i32, i32* %arrayidx.112, align 4 + %arrayidx1.112 = getelementptr inbounds i32, i32* %c, i32 %inc.111 + %225 = load i32, i32* %arrayidx1.112, align 4 + %mul.112 = mul nsw i32 %225, %224 + %arrayidx2.112 = getelementptr inbounds i32, i32* %a, i32 %inc.111 + store i32 %mul.112, i32* %arrayidx2.112, align 4 + %inc.112 = or i32 %i.08, 113 + %arrayidx.113 = getelementptr inbounds i32, i32* %b, i32 %inc.112 + %226 = load i32, i32* %arrayidx.113, align 4 + %arrayidx1.113 = getelementptr inbounds i32, i32* %c, i32 %inc.112 + %227 = load i32, i32* %arrayidx1.113, align 4 + %mul.113 = mul nsw i32 %227, %226 + %arrayidx2.113 = getelementptr inbounds i32, i32* %a, i32 %inc.112 + store i32 %mul.113, i32* %arrayidx2.113, align 4 + %inc.113 = or i32 %i.08, 114 + %arrayidx.114 = getelementptr inbounds i32, i32* %b, i32 %inc.113 + %228 = load i32, i32* %arrayidx.114, align 4 + %arrayidx1.114 = getelementptr inbounds i32, i32* %c, i32 %inc.113 + %229 = load i32, i32* %arrayidx1.114, align 4 + %mul.114 = mul nsw i32 %229, %228 + %arrayidx2.114 = getelementptr inbounds i32, i32* %a, i32 %inc.113 + store i32 %mul.114, i32* %arrayidx2.114, align 4 + %inc.114 = or i32 %i.08, 115 + %arrayidx.115 = getelementptr inbounds i32, i32* %b, i32 %inc.114 + %230 = load i32, i32* %arrayidx.115, align 4 + %arrayidx1.115 = getelementptr inbounds i32, i32* %c, i32 %inc.114 + %231 = load i32, i32* %arrayidx1.115, align 4 + %mul.115 = mul nsw i32 %231, %230 + %arrayidx2.115 = getelementptr inbounds i32, i32* %a, i32 %inc.114 + store i32 %mul.115, i32* %arrayidx2.115, align 4 + %inc.115 = or i32 %i.08, 116 + %arrayidx.116 = getelementptr inbounds i32, i32* %b, i32 %inc.115 + %232 = load i32, i32* %arrayidx.116, align 4 + %arrayidx1.116 = getelementptr inbounds i32, i32* %c, i32 %inc.115 + %233 = load i32, i32* %arrayidx1.116, align 4 + %mul.116 = mul nsw i32 %233, %232 + %arrayidx2.116 = getelementptr inbounds i32, i32* %a, i32 %inc.115 + store i32 %mul.116, i32* %arrayidx2.116, align 4 + %inc.116 = or i32 %i.08, 117 + %arrayidx.117 = getelementptr inbounds i32, i32* %b, i32 %inc.116 + %234 = load i32, i32* %arrayidx.117, align 4 + %arrayidx1.117 = getelementptr inbounds i32, i32* %c, i32 %inc.116 + %235 = load i32, i32* %arrayidx1.117, align 4 + %mul.117 = mul nsw i32 %235, %234 + %arrayidx2.117 = getelementptr inbounds i32, i32* %a, i32 %inc.116 + store i32 %mul.117, i32* %arrayidx2.117, align 4 + %inc.117 = or i32 %i.08, 118 + %arrayidx.118 = getelementptr inbounds i32, i32* %b, i32 %inc.117 + %236 = load i32, i32* %arrayidx.118, align 4 + %arrayidx1.118 = getelementptr inbounds i32, i32* %c, i32 %inc.117 + %237 = load i32, i32* %arrayidx1.118, align 4 + %mul.118 = mul nsw i32 %237, %236 + %arrayidx2.118 = getelementptr inbounds i32, i32* %a, i32 %inc.117 + store i32 %mul.118, i32* %arrayidx2.118, align 4 + %inc.118 = or i32 %i.08, 119 + %arrayidx.119 = getelementptr inbounds i32, i32* %b, i32 %inc.118 + %238 = load i32, i32* %arrayidx.119, align 4 + %arrayidx1.119 = getelementptr inbounds i32, i32* %c, i32 %inc.118 + %239 = load i32, i32* %arrayidx1.119, align 4 + %mul.119 = mul nsw i32 %239, %238 + %arrayidx2.119 = getelementptr inbounds i32, i32* %a, i32 %inc.118 + store i32 %mul.119, i32* %arrayidx2.119, align 4 + %inc.119 = or i32 %i.08, 120 + %arrayidx.120 = getelementptr inbounds i32, i32* %b, i32 %inc.119 + %240 = load i32, i32* %arrayidx.120, align 4 + %arrayidx1.120 = getelementptr inbounds i32, i32* %c, i32 %inc.119 + %241 = load i32, i32* %arrayidx1.120, align 4 + %mul.120 = mul nsw i32 %241, %240 + %arrayidx2.120 = getelementptr inbounds i32, i32* %a, i32 %inc.119 + store i32 %mul.120, i32* %arrayidx2.120, align 4 + %inc.120 = or i32 %i.08, 121 + %arrayidx.121 = getelementptr inbounds i32, i32* %b, i32 %inc.120 + %242 = load i32, i32* %arrayidx.121, align 4 + %arrayidx1.121 = getelementptr inbounds i32, i32* %c, i32 %inc.120 + %243 = load i32, i32* %arrayidx1.121, align 4 + %mul.121 = mul nsw i32 %243, %242 + %arrayidx2.121 = getelementptr inbounds i32, i32* %a, i32 %inc.120 + store i32 %mul.121, i32* %arrayidx2.121, align 4 + %inc.121 = or i32 %i.08, 122 + %arrayidx.122 = getelementptr inbounds i32, i32* %b, i32 %inc.121 + %244 = load i32, i32* %arrayidx.122, align 4 + %arrayidx1.122 = getelementptr inbounds i32, i32* %c, i32 %inc.121 + %245 = load i32, i32* %arrayidx1.122, align 4 + %mul.122 = mul nsw i32 %245, %244 + %arrayidx2.122 = getelementptr inbounds i32, i32* %a, i32 %inc.121 + store i32 %mul.122, i32* %arrayidx2.122, align 4 + %inc.122 = or i32 %i.08, 123 + %arrayidx.123 = getelementptr inbounds i32, i32* %b, i32 %inc.122 + %246 = load i32, i32* %arrayidx.123, align 4 + %arrayidx1.123 = getelementptr inbounds i32, i32* %c, i32 %inc.122 + %247 = load i32, i32* %arrayidx1.123, align 4 + %mul.123 = mul nsw i32 %247, %246 + %arrayidx2.123 = getelementptr inbounds i32, i32* %a, i32 %inc.122 + store i32 %mul.123, i32* %arrayidx2.123, align 4 + %inc.123 = or i32 %i.08, 124 + %arrayidx.124 = getelementptr inbounds i32, i32* %b, i32 %inc.123 + %248 = load i32, i32* %arrayidx.124, align 4 + %arrayidx1.124 = getelementptr inbounds i32, i32* %c, i32 %inc.123 + %249 = load i32, i32* %arrayidx1.124, align 4 + %mul.124 = mul nsw i32 %249, %248 + %arrayidx2.124 = getelementptr inbounds i32, i32* %a, i32 %inc.123 + store i32 %mul.124, i32* %arrayidx2.124, align 4 + %inc.124 = or i32 %i.08, 125 + %arrayidx.125 = getelementptr inbounds i32, i32* %b, i32 %inc.124 + %250 = load i32, i32* %arrayidx.125, align 4 + %arrayidx1.125 = getelementptr inbounds i32, i32* %c, i32 %inc.124 + %251 = load i32, i32* %arrayidx1.125, align 4 + %mul.125 = mul nsw i32 %251, %250 + %arrayidx2.125 = getelementptr inbounds i32, i32* %a, i32 %inc.124 + store i32 %mul.125, i32* %arrayidx2.125, align 4 + %inc.125 = or i32 %i.08, 126 + %arrayidx.126 = getelementptr inbounds i32, i32* %b, i32 %inc.125 + %252 = load i32, i32* %arrayidx.126, align 4 + %arrayidx1.126 = getelementptr inbounds i32, i32* %c, i32 %inc.125 + %253 = load i32, i32* %arrayidx1.126, align 4 + %mul.126 = mul nsw i32 %253, %252 + %arrayidx2.126 = getelementptr inbounds i32, i32* %a, i32 %inc.125 + store i32 %mul.126, i32* %arrayidx2.126, align 4 + %inc.126 = or i32 %i.08, 127 + %arrayidx.127 = getelementptr inbounds i32, i32* %b, i32 %inc.126 + %254 = load i32, i32* %arrayidx.127, align 4 + %arrayidx1.127 = getelementptr inbounds i32, i32* %c, i32 %inc.126 + %255 = load i32, i32* %arrayidx1.127, align 4 + %mul.127 = mul nsw i32 %255, %254 + %arrayidx2.127 = getelementptr inbounds i32, i32* %a, i32 %inc.126 + store i32 %mul.127, i32* %arrayidx2.127, align 4 + %inc.127 = or i32 %i.08, 128 + %arrayidx.128 = getelementptr inbounds i32, i32* %b, i32 %inc.127 + %256 = load i32, i32* %arrayidx.128, align 4 + %arrayidx1.128 = getelementptr inbounds i32, i32* %c, i32 %inc.127 + %257 = load i32, i32* %arrayidx1.128, align 4 + %mul.128 = mul nsw i32 %257, %256 + %arrayidx2.128 = getelementptr inbounds i32, i32* %a, i32 %inc.127 + store i32 %mul.128, i32* %arrayidx2.128, align 4 + %inc.128 = or i32 %i.08, 129 + %arrayidx.129 = getelementptr inbounds i32, i32* %b, i32 %inc.128 + %258 = load i32, i32* %arrayidx.129, align 4 + %arrayidx1.129 = getelementptr inbounds i32, i32* %c, i32 %inc.128 + %259 = load i32, i32* %arrayidx1.129, align 4 + %mul.129 = mul nsw i32 %259, %258 + %arrayidx2.129 = getelementptr inbounds i32, i32* %a, i32 %inc.128 + store i32 %mul.129, i32* %arrayidx2.129, align 4 + %inc.129 = or i32 %i.08, 130 + %arrayidx.130 = getelementptr inbounds i32, i32* %b, i32 %inc.129 + %260 = load i32, i32* %arrayidx.130, align 4 + %arrayidx1.130 = getelementptr inbounds i32, i32* %c, i32 %inc.129 + %261 = load i32, i32* %arrayidx1.130, align 4 + %mul.130 = mul nsw i32 %261, %260 + %arrayidx2.130 = getelementptr inbounds i32, i32* %a, i32 %inc.129 + store i32 %mul.130, i32* %arrayidx2.130, align 4 + %inc.130 = or i32 %i.08, 131 + %arrayidx.131 = getelementptr inbounds i32, i32* %b, i32 %inc.130 + %262 = load i32, i32* %arrayidx.131, align 4 + %arrayidx1.131 = getelementptr inbounds i32, i32* %c, i32 %inc.130 + %263 = load i32, i32* %arrayidx1.131, align 4 + %mul.131 = mul nsw i32 %263, %262 + %arrayidx2.131 = getelementptr inbounds i32, i32* %a, i32 %inc.130 + store i32 %mul.131, i32* %arrayidx2.131, align 4 + %inc.131 = or i32 %i.08, 132 + %arrayidx.132 = getelementptr inbounds i32, i32* %b, i32 %inc.131 + %264 = load i32, i32* %arrayidx.132, align 4 + %arrayidx1.132 = getelementptr inbounds i32, i32* %c, i32 %inc.131 + %265 = load i32, i32* %arrayidx1.132, align 4 + %mul.132 = mul nsw i32 %265, %264 + %arrayidx2.132 = getelementptr inbounds i32, i32* %a, i32 %inc.131 + store i32 %mul.132, i32* %arrayidx2.132, align 4 + %inc.132 = or i32 %i.08, 133 + %arrayidx.133 = getelementptr inbounds i32, i32* %b, i32 %inc.132 + %266 = load i32, i32* %arrayidx.133, align 4 + %arrayidx1.133 = getelementptr inbounds i32, i32* %c, i32 %inc.132 + %267 = load i32, i32* %arrayidx1.133, align 4 + %mul.133 = mul nsw i32 %267, %266 + %arrayidx2.133 = getelementptr inbounds i32, i32* %a, i32 %inc.132 + store i32 %mul.133, i32* %arrayidx2.133, align 4 + %inc.133 = or i32 %i.08, 134 + %arrayidx.134 = getelementptr inbounds i32, i32* %b, i32 %inc.133 + %268 = load i32, i32* %arrayidx.134, align 4 + %arrayidx1.134 = getelementptr inbounds i32, i32* %c, i32 %inc.133 + %269 = load i32, i32* %arrayidx1.134, align 4 + %mul.134 = mul nsw i32 %269, %268 + %arrayidx2.134 = getelementptr inbounds i32, i32* %a, i32 %inc.133 + store i32 %mul.134, i32* %arrayidx2.134, align 4 + %inc.134 = or i32 %i.08, 135 + %arrayidx.135 = getelementptr inbounds i32, i32* %b, i32 %inc.134 + %270 = load i32, i32* %arrayidx.135, align 4 + %arrayidx1.135 = getelementptr inbounds i32, i32* %c, i32 %inc.134 + %271 = load i32, i32* %arrayidx1.135, align 4 + %mul.135 = mul nsw i32 %271, %270 + %arrayidx2.135 = getelementptr inbounds i32, i32* %a, i32 %inc.134 + store i32 %mul.135, i32* %arrayidx2.135, align 4 + %inc.135 = or i32 %i.08, 136 + %arrayidx.136 = getelementptr inbounds i32, i32* %b, i32 %inc.135 + %272 = load i32, i32* %arrayidx.136, align 4 + %arrayidx1.136 = getelementptr inbounds i32, i32* %c, i32 %inc.135 + %273 = load i32, i32* %arrayidx1.136, align 4 + %mul.136 = mul nsw i32 %273, %272 + %arrayidx2.136 = getelementptr inbounds i32, i32* %a, i32 %inc.135 + store i32 %mul.136, i32* %arrayidx2.136, align 4 + %inc.136 = or i32 %i.08, 137 + %arrayidx.137 = getelementptr inbounds i32, i32* %b, i32 %inc.136 + %274 = load i32, i32* %arrayidx.137, align 4 + %arrayidx1.137 = getelementptr inbounds i32, i32* %c, i32 %inc.136 + %275 = load i32, i32* %arrayidx1.137, align 4 + %mul.137 = mul nsw i32 %275, %274 + %arrayidx2.137 = getelementptr inbounds i32, i32* %a, i32 %inc.136 + store i32 %mul.137, i32* %arrayidx2.137, align 4 + %inc.137 = or i32 %i.08, 138 + %arrayidx.138 = getelementptr inbounds i32, i32* %b, i32 %inc.137 + %276 = load i32, i32* %arrayidx.138, align 4 + %arrayidx1.138 = getelementptr inbounds i32, i32* %c, i32 %inc.137 + %277 = load i32, i32* %arrayidx1.138, align 4 + %mul.138 = mul nsw i32 %277, %276 + %arrayidx2.138 = getelementptr inbounds i32, i32* %a, i32 %inc.137 + store i32 %mul.138, i32* %arrayidx2.138, align 4 + %inc.138 = or i32 %i.08, 139 + %arrayidx.139 = getelementptr inbounds i32, i32* %b, i32 %inc.138 + %278 = load i32, i32* %arrayidx.139, align 4 + %arrayidx1.139 = getelementptr inbounds i32, i32* %c, i32 %inc.138 + %279 = load i32, i32* %arrayidx1.139, align 4 + %mul.139 = mul nsw i32 %279, %278 + %arrayidx2.139 = getelementptr inbounds i32, i32* %a, i32 %inc.138 + store i32 %mul.139, i32* %arrayidx2.139, align 4 + %inc.139 = or i32 %i.08, 140 + %arrayidx.140 = getelementptr inbounds i32, i32* %b, i32 %inc.139 + %280 = load i32, i32* %arrayidx.140, align 4 + %arrayidx1.140 = getelementptr inbounds i32, i32* %c, i32 %inc.139 + %281 = load i32, i32* %arrayidx1.140, align 4 + %mul.140 = mul nsw i32 %281, %280 + %arrayidx2.140 = getelementptr inbounds i32, i32* %a, i32 %inc.139 + store i32 %mul.140, i32* %arrayidx2.140, align 4 + %inc.140 = or i32 %i.08, 141 + %arrayidx.141 = getelementptr inbounds i32, i32* %b, i32 %inc.140 + %282 = load i32, i32* %arrayidx.141, align 4 + %arrayidx1.141 = getelementptr inbounds i32, i32* %c, i32 %inc.140 + %283 = load i32, i32* %arrayidx1.141, align 4 + %mul.141 = mul nsw i32 %283, %282 + %arrayidx2.141 = getelementptr inbounds i32, i32* %a, i32 %inc.140 + store i32 %mul.141, i32* %arrayidx2.141, align 4 + %inc.141 = or i32 %i.08, 142 + %arrayidx.142 = getelementptr inbounds i32, i32* %b, i32 %inc.141 + %284 = load i32, i32* %arrayidx.142, align 4 + %arrayidx1.142 = getelementptr inbounds i32, i32* %c, i32 %inc.141 + %285 = load i32, i32* %arrayidx1.142, align 4 + %mul.142 = mul nsw i32 %285, %284 + %arrayidx2.142 = getelementptr inbounds i32, i32* %a, i32 %inc.141 + store i32 %mul.142, i32* %arrayidx2.142, align 4 + %inc.142 = or i32 %i.08, 143 + %arrayidx.143 = getelementptr inbounds i32, i32* %b, i32 %inc.142 + %286 = load i32, i32* %arrayidx.143, align 4 + %arrayidx1.143 = getelementptr inbounds i32, i32* %c, i32 %inc.142 + %287 = load i32, i32* %arrayidx1.143, align 4 + %mul.143 = mul nsw i32 %287, %286 + %arrayidx2.143 = getelementptr inbounds i32, i32* %a, i32 %inc.142 + store i32 %mul.143, i32* %arrayidx2.143, align 4 + %inc.143 = or i32 %i.08, 144 + %arrayidx.144 = getelementptr inbounds i32, i32* %b, i32 %inc.143 + %288 = load i32, i32* %arrayidx.144, align 4 + %arrayidx1.144 = getelementptr inbounds i32, i32* %c, i32 %inc.143 + %289 = load i32, i32* %arrayidx1.144, align 4 + %mul.144 = mul nsw i32 %289, %288 + %arrayidx2.144 = getelementptr inbounds i32, i32* %a, i32 %inc.143 + store i32 %mul.144, i32* %arrayidx2.144, align 4 + %inc.144 = or i32 %i.08, 145 + %arrayidx.145 = getelementptr inbounds i32, i32* %b, i32 %inc.144 + %290 = load i32, i32* %arrayidx.145, align 4 + %arrayidx1.145 = getelementptr inbounds i32, i32* %c, i32 %inc.144 + %291 = load i32, i32* %arrayidx1.145, align 4 + %mul.145 = mul nsw i32 %291, %290 + %arrayidx2.145 = getelementptr inbounds i32, i32* %a, i32 %inc.144 + store i32 %mul.145, i32* %arrayidx2.145, align 4 + %inc.145 = or i32 %i.08, 146 + %arrayidx.146 = getelementptr inbounds i32, i32* %b, i32 %inc.145 + %292 = load i32, i32* %arrayidx.146, align 4 + %arrayidx1.146 = getelementptr inbounds i32, i32* %c, i32 %inc.145 + %293 = load i32, i32* %arrayidx1.146, align 4 + %mul.146 = mul nsw i32 %293, %292 + %arrayidx2.146 = getelementptr inbounds i32, i32* %a, i32 %inc.145 + store i32 %mul.146, i32* %arrayidx2.146, align 4 + %inc.146 = or i32 %i.08, 147 + %arrayidx.147 = getelementptr inbounds i32, i32* %b, i32 %inc.146 + %294 = load i32, i32* %arrayidx.147, align 4 + %arrayidx1.147 = getelementptr inbounds i32, i32* %c, i32 %inc.146 + %295 = load i32, i32* %arrayidx1.147, align 4 + %mul.147 = mul nsw i32 %295, %294 + %arrayidx2.147 = getelementptr inbounds i32, i32* %a, i32 %inc.146 + store i32 %mul.147, i32* %arrayidx2.147, align 4 + %inc.147 = or i32 %i.08, 148 + %arrayidx.148 = getelementptr inbounds i32, i32* %b, i32 %inc.147 + %296 = load i32, i32* %arrayidx.148, align 4 + %arrayidx1.148 = getelementptr inbounds i32, i32* %c, i32 %inc.147 + %297 = load i32, i32* %arrayidx1.148, align 4 + %mul.148 = mul nsw i32 %297, %296 + %arrayidx2.148 = getelementptr inbounds i32, i32* %a, i32 %inc.147 + store i32 %mul.148, i32* %arrayidx2.148, align 4 + %inc.148 = or i32 %i.08, 149 + %arrayidx.149 = getelementptr inbounds i32, i32* %b, i32 %inc.148 + %298 = load i32, i32* %arrayidx.149, align 4 + %arrayidx1.149 = getelementptr inbounds i32, i32* %c, i32 %inc.148 + %299 = load i32, i32* %arrayidx1.149, align 4 + %mul.149 = mul nsw i32 %299, %298 + %arrayidx2.149 = getelementptr inbounds i32, i32* %a, i32 %inc.148 + store i32 %mul.149, i32* %arrayidx2.149, align 4 + %inc.149 = or i32 %i.08, 150 + %arrayidx.150 = getelementptr inbounds i32, i32* %b, i32 %inc.149 + %300 = load i32, i32* %arrayidx.150, align 4 + %arrayidx1.150 = getelementptr inbounds i32, i32* %c, i32 %inc.149 + %301 = load i32, i32* %arrayidx1.150, align 4 + %mul.150 = mul nsw i32 %301, %300 + %arrayidx2.150 = getelementptr inbounds i32, i32* %a, i32 %inc.149 + store i32 %mul.150, i32* %arrayidx2.150, align 4 + %inc.150 = or i32 %i.08, 151 + %arrayidx.151 = getelementptr inbounds i32, i32* %b, i32 %inc.150 + %302 = load i32, i32* %arrayidx.151, align 4 + %arrayidx1.151 = getelementptr inbounds i32, i32* %c, i32 %inc.150 + %303 = load i32, i32* %arrayidx1.151, align 4 + %mul.151 = mul nsw i32 %303, %302 + %arrayidx2.151 = getelementptr inbounds i32, i32* %a, i32 %inc.150 + store i32 %mul.151, i32* %arrayidx2.151, align 4 + %inc.151 = or i32 %i.08, 152 + %arrayidx.152 = getelementptr inbounds i32, i32* %b, i32 %inc.151 + %304 = load i32, i32* %arrayidx.152, align 4 + %arrayidx1.152 = getelementptr inbounds i32, i32* %c, i32 %inc.151 + %305 = load i32, i32* %arrayidx1.152, align 4 + %mul.152 = mul nsw i32 %305, %304 + %arrayidx2.152 = getelementptr inbounds i32, i32* %a, i32 %inc.151 + store i32 %mul.152, i32* %arrayidx2.152, align 4 + %inc.152 = or i32 %i.08, 153 + %arrayidx.153 = getelementptr inbounds i32, i32* %b, i32 %inc.152 + %306 = load i32, i32* %arrayidx.153, align 4 + %arrayidx1.153 = getelementptr inbounds i32, i32* %c, i32 %inc.152 + %307 = load i32, i32* %arrayidx1.153, align 4 + %mul.153 = mul nsw i32 %307, %306 + %arrayidx2.153 = getelementptr inbounds i32, i32* %a, i32 %inc.152 + store i32 %mul.153, i32* %arrayidx2.153, align 4 + %inc.153 = or i32 %i.08, 154 + %arrayidx.154 = getelementptr inbounds i32, i32* %b, i32 %inc.153 + %308 = load i32, i32* %arrayidx.154, align 4 + %arrayidx1.154 = getelementptr inbounds i32, i32* %c, i32 %inc.153 + %309 = load i32, i32* %arrayidx1.154, align 4 + %mul.154 = mul nsw i32 %309, %308 + %arrayidx2.154 = getelementptr inbounds i32, i32* %a, i32 %inc.153 + store i32 %mul.154, i32* %arrayidx2.154, align 4 + %inc.154 = or i32 %i.08, 155 + %arrayidx.155 = getelementptr inbounds i32, i32* %b, i32 %inc.154 + %310 = load i32, i32* %arrayidx.155, align 4 + %arrayidx1.155 = getelementptr inbounds i32, i32* %c, i32 %inc.154 + %311 = load i32, i32* %arrayidx1.155, align 4 + %mul.155 = mul nsw i32 %311, %310 + %arrayidx2.155 = getelementptr inbounds i32, i32* %a, i32 %inc.154 + store i32 %mul.155, i32* %arrayidx2.155, align 4 + %inc.155 = or i32 %i.08, 156 + %arrayidx.156 = getelementptr inbounds i32, i32* %b, i32 %inc.155 + %312 = load i32, i32* %arrayidx.156, align 4 + %arrayidx1.156 = getelementptr inbounds i32, i32* %c, i32 %inc.155 + %313 = load i32, i32* %arrayidx1.156, align 4 + %mul.156 = mul nsw i32 %313, %312 + %arrayidx2.156 = getelementptr inbounds i32, i32* %a, i32 %inc.155 + store i32 %mul.156, i32* %arrayidx2.156, align 4 + %inc.156 = or i32 %i.08, 157 + %arrayidx.157 = getelementptr inbounds i32, i32* %b, i32 %inc.156 + %314 = load i32, i32* %arrayidx.157, align 4 + %arrayidx1.157 = getelementptr inbounds i32, i32* %c, i32 %inc.156 + %315 = load i32, i32* %arrayidx1.157, align 4 + %mul.157 = mul nsw i32 %315, %314 + %arrayidx2.157 = getelementptr inbounds i32, i32* %a, i32 %inc.156 + store i32 %mul.157, i32* %arrayidx2.157, align 4 + %inc.157 = or i32 %i.08, 158 + %arrayidx.158 = getelementptr inbounds i32, i32* %b, i32 %inc.157 + %316 = load i32, i32* %arrayidx.158, align 4 + %arrayidx1.158 = getelementptr inbounds i32, i32* %c, i32 %inc.157 + %317 = load i32, i32* %arrayidx1.158, align 4 + %mul.158 = mul nsw i32 %317, %316 + %arrayidx2.158 = getelementptr inbounds i32, i32* %a, i32 %inc.157 + store i32 %mul.158, i32* %arrayidx2.158, align 4 + %inc.158 = or i32 %i.08, 159 + %arrayidx.159 = getelementptr inbounds i32, i32* %b, i32 %inc.158 + %318 = load i32, i32* %arrayidx.159, align 4 + %arrayidx1.159 = getelementptr inbounds i32, i32* %c, i32 %inc.158 + %319 = load i32, i32* %arrayidx1.159, align 4 + %mul.159 = mul nsw i32 %319, %318 + %arrayidx2.159 = getelementptr inbounds i32, i32* %a, i32 %inc.158 + store i32 %mul.159, i32* %arrayidx2.159, align 4 + %inc.159 = or i32 %i.08, 160 + %arrayidx.160 = getelementptr inbounds i32, i32* %b, i32 %inc.159 + %320 = load i32, i32* %arrayidx.160, align 4 + %arrayidx1.160 = getelementptr inbounds i32, i32* %c, i32 %inc.159 + %321 = load i32, i32* %arrayidx1.160, align 4 + %mul.160 = mul nsw i32 %321, %320 + %arrayidx2.160 = getelementptr inbounds i32, i32* %a, i32 %inc.159 + store i32 %mul.160, i32* %arrayidx2.160, align 4 + %inc.160 = or i32 %i.08, 161 + %arrayidx.161 = getelementptr inbounds i32, i32* %b, i32 %inc.160 + %322 = load i32, i32* %arrayidx.161, align 4 + %arrayidx1.161 = getelementptr inbounds i32, i32* %c, i32 %inc.160 + %323 = load i32, i32* %arrayidx1.161, align 4 + %mul.161 = mul nsw i32 %323, %322 + %arrayidx2.161 = getelementptr inbounds i32, i32* %a, i32 %inc.160 + store i32 %mul.161, i32* %arrayidx2.161, align 4 + %inc.161 = or i32 %i.08, 162 + %arrayidx.162 = getelementptr inbounds i32, i32* %b, i32 %inc.161 + %324 = load i32, i32* %arrayidx.162, align 4 + %arrayidx1.162 = getelementptr inbounds i32, i32* %c, i32 %inc.161 + %325 = load i32, i32* %arrayidx1.162, align 4 + %mul.162 = mul nsw i32 %325, %324 + %arrayidx2.162 = getelementptr inbounds i32, i32* %a, i32 %inc.161 + store i32 %mul.162, i32* %arrayidx2.162, align 4 + %inc.162 = or i32 %i.08, 163 + %arrayidx.163 = getelementptr inbounds i32, i32* %b, i32 %inc.162 + %326 = load i32, i32* %arrayidx.163, align 4 + %arrayidx1.163 = getelementptr inbounds i32, i32* %c, i32 %inc.162 + %327 = load i32, i32* %arrayidx1.163, align 4 + %mul.163 = mul nsw i32 %327, %326 + %arrayidx2.163 = getelementptr inbounds i32, i32* %a, i32 %inc.162 + store i32 %mul.163, i32* %arrayidx2.163, align 4 + %inc.163 = or i32 %i.08, 164 + %arrayidx.164 = getelementptr inbounds i32, i32* %b, i32 %inc.163 + %328 = load i32, i32* %arrayidx.164, align 4 + %arrayidx1.164 = getelementptr inbounds i32, i32* %c, i32 %inc.163 + %329 = load i32, i32* %arrayidx1.164, align 4 + %mul.164 = mul nsw i32 %329, %328 + %arrayidx2.164 = getelementptr inbounds i32, i32* %a, i32 %inc.163 + store i32 %mul.164, i32* %arrayidx2.164, align 4 + %inc.164 = or i32 %i.08, 165 + %arrayidx.165 = getelementptr inbounds i32, i32* %b, i32 %inc.164 + %330 = load i32, i32* %arrayidx.165, align 4 + %arrayidx1.165 = getelementptr inbounds i32, i32* %c, i32 %inc.164 + %331 = load i32, i32* %arrayidx1.165, align 4 + %mul.165 = mul nsw i32 %331, %330 + %arrayidx2.165 = getelementptr inbounds i32, i32* %a, i32 %inc.164 + store i32 %mul.165, i32* %arrayidx2.165, align 4 + %inc.165 = or i32 %i.08, 166 + %arrayidx.166 = getelementptr inbounds i32, i32* %b, i32 %inc.165 + %332 = load i32, i32* %arrayidx.166, align 4 + %arrayidx1.166 = getelementptr inbounds i32, i32* %c, i32 %inc.165 + %333 = load i32, i32* %arrayidx1.166, align 4 + %mul.166 = mul nsw i32 %333, %332 + %arrayidx2.166 = getelementptr inbounds i32, i32* %a, i32 %inc.165 + store i32 %mul.166, i32* %arrayidx2.166, align 4 + %inc.166 = or i32 %i.08, 167 + %arrayidx.167 = getelementptr inbounds i32, i32* %b, i32 %inc.166 + %334 = load i32, i32* %arrayidx.167, align 4 + %arrayidx1.167 = getelementptr inbounds i32, i32* %c, i32 %inc.166 + %335 = load i32, i32* %arrayidx1.167, align 4 + %mul.167 = mul nsw i32 %335, %334 + %arrayidx2.167 = getelementptr inbounds i32, i32* %a, i32 %inc.166 + store i32 %mul.167, i32* %arrayidx2.167, align 4 + %inc.167 = or i32 %i.08, 168 + %arrayidx.168 = getelementptr inbounds i32, i32* %b, i32 %inc.167 + %336 = load i32, i32* %arrayidx.168, align 4 + %arrayidx1.168 = getelementptr inbounds i32, i32* %c, i32 %inc.167 + %337 = load i32, i32* %arrayidx1.168, align 4 + %mul.168 = mul nsw i32 %337, %336 + %arrayidx2.168 = getelementptr inbounds i32, i32* %a, i32 %inc.167 + store i32 %mul.168, i32* %arrayidx2.168, align 4 + %inc.168 = or i32 %i.08, 169 + %arrayidx.169 = getelementptr inbounds i32, i32* %b, i32 %inc.168 + %338 = load i32, i32* %arrayidx.169, align 4 + %arrayidx1.169 = getelementptr inbounds i32, i32* %c, i32 %inc.168 + %339 = load i32, i32* %arrayidx1.169, align 4 + %mul.169 = mul nsw i32 %339, %338 + %arrayidx2.169 = getelementptr inbounds i32, i32* %a, i32 %inc.168 + store i32 %mul.169, i32* %arrayidx2.169, align 4 + %inc.169 = or i32 %i.08, 170 + %arrayidx.170 = getelementptr inbounds i32, i32* %b, i32 %inc.169 + %340 = load i32, i32* %arrayidx.170, align 4 + %arrayidx1.170 = getelementptr inbounds i32, i32* %c, i32 %inc.169 + %341 = load i32, i32* %arrayidx1.170, align 4 + %mul.170 = mul nsw i32 %341, %340 + %arrayidx2.170 = getelementptr inbounds i32, i32* %a, i32 %inc.169 + store i32 %mul.170, i32* %arrayidx2.170, align 4 + %inc.170 = or i32 %i.08, 171 + %arrayidx.171 = getelementptr inbounds i32, i32* %b, i32 %inc.170 + %342 = load i32, i32* %arrayidx.171, align 4 + %arrayidx1.171 = getelementptr inbounds i32, i32* %c, i32 %inc.170 + %343 = load i32, i32* %arrayidx1.171, align 4 + %mul.171 = mul nsw i32 %343, %342 + %arrayidx2.171 = getelementptr inbounds i32, i32* %a, i32 %inc.170 + store i32 %mul.171, i32* %arrayidx2.171, align 4 + %inc.171 = or i32 %i.08, 172 + %arrayidx.172 = getelementptr inbounds i32, i32* %b, i32 %inc.171 + %344 = load i32, i32* %arrayidx.172, align 4 + %arrayidx1.172 = getelementptr inbounds i32, i32* %c, i32 %inc.171 + %345 = load i32, i32* %arrayidx1.172, align 4 + %mul.172 = mul nsw i32 %345, %344 + %arrayidx2.172 = getelementptr inbounds i32, i32* %a, i32 %inc.171 + store i32 %mul.172, i32* %arrayidx2.172, align 4 + %inc.172 = or i32 %i.08, 173 + %arrayidx.173 = getelementptr inbounds i32, i32* %b, i32 %inc.172 + %346 = load i32, i32* %arrayidx.173, align 4 + %arrayidx1.173 = getelementptr inbounds i32, i32* %c, i32 %inc.172 + %347 = load i32, i32* %arrayidx1.173, align 4 + %mul.173 = mul nsw i32 %347, %346 + %arrayidx2.173 = getelementptr inbounds i32, i32* %a, i32 %inc.172 + store i32 %mul.173, i32* %arrayidx2.173, align 4 + %inc.173 = or i32 %i.08, 174 + %arrayidx.174 = getelementptr inbounds i32, i32* %b, i32 %inc.173 + %348 = load i32, i32* %arrayidx.174, align 4 + %arrayidx1.174 = getelementptr inbounds i32, i32* %c, i32 %inc.173 + %349 = load i32, i32* %arrayidx1.174, align 4 + %mul.174 = mul nsw i32 %349, %348 + %arrayidx2.174 = getelementptr inbounds i32, i32* %a, i32 %inc.173 + store i32 %mul.174, i32* %arrayidx2.174, align 4 + %inc.174 = or i32 %i.08, 175 + %arrayidx.175 = getelementptr inbounds i32, i32* %b, i32 %inc.174 + %350 = load i32, i32* %arrayidx.175, align 4 + %arrayidx1.175 = getelementptr inbounds i32, i32* %c, i32 %inc.174 + %351 = load i32, i32* %arrayidx1.175, align 4 + %mul.175 = mul nsw i32 %351, %350 + %arrayidx2.175 = getelementptr inbounds i32, i32* %a, i32 %inc.174 + store i32 %mul.175, i32* %arrayidx2.175, align 4 + %inc.175 = or i32 %i.08, 176 + %arrayidx.176 = getelementptr inbounds i32, i32* %b, i32 %inc.175 + %352 = load i32, i32* %arrayidx.176, align 4 + %arrayidx1.176 = getelementptr inbounds i32, i32* %c, i32 %inc.175 + %353 = load i32, i32* %arrayidx1.176, align 4 + %mul.176 = mul nsw i32 %353, %352 + %arrayidx2.176 = getelementptr inbounds i32, i32* %a, i32 %inc.175 + store i32 %mul.176, i32* %arrayidx2.176, align 4 + %inc.176 = or i32 %i.08, 177 + %arrayidx.177 = getelementptr inbounds i32, i32* %b, i32 %inc.176 + %354 = load i32, i32* %arrayidx.177, align 4 + %arrayidx1.177 = getelementptr inbounds i32, i32* %c, i32 %inc.176 + %355 = load i32, i32* %arrayidx1.177, align 4 + %mul.177 = mul nsw i32 %355, %354 + %arrayidx2.177 = getelementptr inbounds i32, i32* %a, i32 %inc.176 + store i32 %mul.177, i32* %arrayidx2.177, align 4 + %inc.177 = or i32 %i.08, 178 + %arrayidx.178 = getelementptr inbounds i32, i32* %b, i32 %inc.177 + %356 = load i32, i32* %arrayidx.178, align 4 + %arrayidx1.178 = getelementptr inbounds i32, i32* %c, i32 %inc.177 + %357 = load i32, i32* %arrayidx1.178, align 4 + %mul.178 = mul nsw i32 %357, %356 + %arrayidx2.178 = getelementptr inbounds i32, i32* %a, i32 %inc.177 + store i32 %mul.178, i32* %arrayidx2.178, align 4 + %inc.178 = or i32 %i.08, 179 + %arrayidx.179 = getelementptr inbounds i32, i32* %b, i32 %inc.178 + %358 = load i32, i32* %arrayidx.179, align 4 + %arrayidx1.179 = getelementptr inbounds i32, i32* %c, i32 %inc.178 + %359 = load i32, i32* %arrayidx1.179, align 4 + %mul.179 = mul nsw i32 %359, %358 + %arrayidx2.179 = getelementptr inbounds i32, i32* %a, i32 %inc.178 + store i32 %mul.179, i32* %arrayidx2.179, align 4 + %inc.179 = or i32 %i.08, 180 + %arrayidx.180 = getelementptr inbounds i32, i32* %b, i32 %inc.179 + %360 = load i32, i32* %arrayidx.180, align 4 + %arrayidx1.180 = getelementptr inbounds i32, i32* %c, i32 %inc.179 + %361 = load i32, i32* %arrayidx1.180, align 4 + %mul.180 = mul nsw i32 %361, %360 + %arrayidx2.180 = getelementptr inbounds i32, i32* %a, i32 %inc.179 + store i32 %mul.180, i32* %arrayidx2.180, align 4 + %inc.180 = or i32 %i.08, 181 + %arrayidx.181 = getelementptr inbounds i32, i32* %b, i32 %inc.180 + %362 = load i32, i32* %arrayidx.181, align 4 + %arrayidx1.181 = getelementptr inbounds i32, i32* %c, i32 %inc.180 + %363 = load i32, i32* %arrayidx1.181, align 4 + %mul.181 = mul nsw i32 %363, %362 + %arrayidx2.181 = getelementptr inbounds i32, i32* %a, i32 %inc.180 + store i32 %mul.181, i32* %arrayidx2.181, align 4 + %inc.181 = or i32 %i.08, 182 + %arrayidx.182 = getelementptr inbounds i32, i32* %b, i32 %inc.181 + %364 = load i32, i32* %arrayidx.182, align 4 + %arrayidx1.182 = getelementptr inbounds i32, i32* %c, i32 %inc.181 + %365 = load i32, i32* %arrayidx1.182, align 4 + %mul.182 = mul nsw i32 %365, %364 + %arrayidx2.182 = getelementptr inbounds i32, i32* %a, i32 %inc.181 + store i32 %mul.182, i32* %arrayidx2.182, align 4 + %inc.182 = or i32 %i.08, 183 + %arrayidx.183 = getelementptr inbounds i32, i32* %b, i32 %inc.182 + %366 = load i32, i32* %arrayidx.183, align 4 + %arrayidx1.183 = getelementptr inbounds i32, i32* %c, i32 %inc.182 + %367 = load i32, i32* %arrayidx1.183, align 4 + %mul.183 = mul nsw i32 %367, %366 + %arrayidx2.183 = getelementptr inbounds i32, i32* %a, i32 %inc.182 + store i32 %mul.183, i32* %arrayidx2.183, align 4 + %inc.183 = or i32 %i.08, 184 + %arrayidx.184 = getelementptr inbounds i32, i32* %b, i32 %inc.183 + %368 = load i32, i32* %arrayidx.184, align 4 + %arrayidx1.184 = getelementptr inbounds i32, i32* %c, i32 %inc.183 + %369 = load i32, i32* %arrayidx1.184, align 4 + %mul.184 = mul nsw i32 %369, %368 + %arrayidx2.184 = getelementptr inbounds i32, i32* %a, i32 %inc.183 + store i32 %mul.184, i32* %arrayidx2.184, align 4 + %inc.184 = or i32 %i.08, 185 + %arrayidx.185 = getelementptr inbounds i32, i32* %b, i32 %inc.184 + %370 = load i32, i32* %arrayidx.185, align 4 + %arrayidx1.185 = getelementptr inbounds i32, i32* %c, i32 %inc.184 + %371 = load i32, i32* %arrayidx1.185, align 4 + %mul.185 = mul nsw i32 %371, %370 + %arrayidx2.185 = getelementptr inbounds i32, i32* %a, i32 %inc.184 + store i32 %mul.185, i32* %arrayidx2.185, align 4 + %inc.185 = or i32 %i.08, 186 + %arrayidx.186 = getelementptr inbounds i32, i32* %b, i32 %inc.185 + %372 = load i32, i32* %arrayidx.186, align 4 + %arrayidx1.186 = getelementptr inbounds i32, i32* %c, i32 %inc.185 + %373 = load i32, i32* %arrayidx1.186, align 4 + %mul.186 = mul nsw i32 %373, %372 + %arrayidx2.186 = getelementptr inbounds i32, i32* %a, i32 %inc.185 + store i32 %mul.186, i32* %arrayidx2.186, align 4 + %inc.186 = or i32 %i.08, 187 + %arrayidx.187 = getelementptr inbounds i32, i32* %b, i32 %inc.186 + %374 = load i32, i32* %arrayidx.187, align 4 + %arrayidx1.187 = getelementptr inbounds i32, i32* %c, i32 %inc.186 + %375 = load i32, i32* %arrayidx1.187, align 4 + %mul.187 = mul nsw i32 %375, %374 + %arrayidx2.187 = getelementptr inbounds i32, i32* %a, i32 %inc.186 + store i32 %mul.187, i32* %arrayidx2.187, align 4 + %inc.187 = or i32 %i.08, 188 + %arrayidx.188 = getelementptr inbounds i32, i32* %b, i32 %inc.187 + %376 = load i32, i32* %arrayidx.188, align 4 + %arrayidx1.188 = getelementptr inbounds i32, i32* %c, i32 %inc.187 + %377 = load i32, i32* %arrayidx1.188, align 4 + %mul.188 = mul nsw i32 %377, %376 + %arrayidx2.188 = getelementptr inbounds i32, i32* %a, i32 %inc.187 + store i32 %mul.188, i32* %arrayidx2.188, align 4 + %inc.188 = or i32 %i.08, 189 + %arrayidx.189 = getelementptr inbounds i32, i32* %b, i32 %inc.188 + %378 = load i32, i32* %arrayidx.189, align 4 + %arrayidx1.189 = getelementptr inbounds i32, i32* %c, i32 %inc.188 + %379 = load i32, i32* %arrayidx1.189, align 4 + %mul.189 = mul nsw i32 %379, %378 + %arrayidx2.189 = getelementptr inbounds i32, i32* %a, i32 %inc.188 + store i32 %mul.189, i32* %arrayidx2.189, align 4 + %inc.189 = or i32 %i.08, 190 + %arrayidx.190 = getelementptr inbounds i32, i32* %b, i32 %inc.189 + %380 = load i32, i32* %arrayidx.190, align 4 + %arrayidx1.190 = getelementptr inbounds i32, i32* %c, i32 %inc.189 + %381 = load i32, i32* %arrayidx1.190, align 4 + %mul.190 = mul nsw i32 %381, %380 + %arrayidx2.190 = getelementptr inbounds i32, i32* %a, i32 %inc.189 + store i32 %mul.190, i32* %arrayidx2.190, align 4 + %inc.190 = or i32 %i.08, 191 + %arrayidx.191 = getelementptr inbounds i32, i32* %b, i32 %inc.190 + %382 = load i32, i32* %arrayidx.191, align 4 + %arrayidx1.191 = getelementptr inbounds i32, i32* %c, i32 %inc.190 + %383 = load i32, i32* %arrayidx1.191, align 4 + %mul.191 = mul nsw i32 %383, %382 + %arrayidx2.191 = getelementptr inbounds i32, i32* %a, i32 %inc.190 + store i32 %mul.191, i32* %arrayidx2.191, align 4 + %inc.191 = or i32 %i.08, 192 + %arrayidx.192 = getelementptr inbounds i32, i32* %b, i32 %inc.191 + %384 = load i32, i32* %arrayidx.192, align 4 + %arrayidx1.192 = getelementptr inbounds i32, i32* %c, i32 %inc.191 + %385 = load i32, i32* %arrayidx1.192, align 4 + %mul.192 = mul nsw i32 %385, %384 + %arrayidx2.192 = getelementptr inbounds i32, i32* %a, i32 %inc.191 + store i32 %mul.192, i32* %arrayidx2.192, align 4 + %inc.192 = or i32 %i.08, 193 + %arrayidx.193 = getelementptr inbounds i32, i32* %b, i32 %inc.192 + %386 = load i32, i32* %arrayidx.193, align 4 + %arrayidx1.193 = getelementptr inbounds i32, i32* %c, i32 %inc.192 + %387 = load i32, i32* %arrayidx1.193, align 4 + %mul.193 = mul nsw i32 %387, %386 + %arrayidx2.193 = getelementptr inbounds i32, i32* %a, i32 %inc.192 + store i32 %mul.193, i32* %arrayidx2.193, align 4 + %inc.193 = or i32 %i.08, 194 + %arrayidx.194 = getelementptr inbounds i32, i32* %b, i32 %inc.193 + %388 = load i32, i32* %arrayidx.194, align 4 + %arrayidx1.194 = getelementptr inbounds i32, i32* %c, i32 %inc.193 + %389 = load i32, i32* %arrayidx1.194, align 4 + %mul.194 = mul nsw i32 %389, %388 + %arrayidx2.194 = getelementptr inbounds i32, i32* %a, i32 %inc.193 + store i32 %mul.194, i32* %arrayidx2.194, align 4 + %inc.194 = or i32 %i.08, 195 + %arrayidx.195 = getelementptr inbounds i32, i32* %b, i32 %inc.194 + %390 = load i32, i32* %arrayidx.195, align 4 + %arrayidx1.195 = getelementptr inbounds i32, i32* %c, i32 %inc.194 + %391 = load i32, i32* %arrayidx1.195, align 4 + %mul.195 = mul nsw i32 %391, %390 + %arrayidx2.195 = getelementptr inbounds i32, i32* %a, i32 %inc.194 + store i32 %mul.195, i32* %arrayidx2.195, align 4 + %inc.195 = or i32 %i.08, 196 + %arrayidx.196 = getelementptr inbounds i32, i32* %b, i32 %inc.195 + %392 = load i32, i32* %arrayidx.196, align 4 + %arrayidx1.196 = getelementptr inbounds i32, i32* %c, i32 %inc.195 + %393 = load i32, i32* %arrayidx1.196, align 4 + %mul.196 = mul nsw i32 %393, %392 + %arrayidx2.196 = getelementptr inbounds i32, i32* %a, i32 %inc.195 + store i32 %mul.196, i32* %arrayidx2.196, align 4 + %inc.196 = or i32 %i.08, 197 + %arrayidx.197 = getelementptr inbounds i32, i32* %b, i32 %inc.196 + %394 = load i32, i32* %arrayidx.197, align 4 + %arrayidx1.197 = getelementptr inbounds i32, i32* %c, i32 %inc.196 + %395 = load i32, i32* %arrayidx1.197, align 4 + %mul.197 = mul nsw i32 %395, %394 + %arrayidx2.197 = getelementptr inbounds i32, i32* %a, i32 %inc.196 + store i32 %mul.197, i32* %arrayidx2.197, align 4 + %inc.197 = or i32 %i.08, 198 + %arrayidx.198 = getelementptr inbounds i32, i32* %b, i32 %inc.197 + %396 = load i32, i32* %arrayidx.198, align 4 + %arrayidx1.198 = getelementptr inbounds i32, i32* %c, i32 %inc.197 + %397 = load i32, i32* %arrayidx1.198, align 4 + %mul.198 = mul nsw i32 %397, %396 + %arrayidx2.198 = getelementptr inbounds i32, i32* %a, i32 %inc.197 + store i32 %mul.198, i32* %arrayidx2.198, align 4 + %inc.198 = or i32 %i.08, 199 + %arrayidx.199 = getelementptr inbounds i32, i32* %b, i32 %inc.198 + %398 = load i32, i32* %arrayidx.199, align 4 + %arrayidx1.199 = getelementptr inbounds i32, i32* %c, i32 %inc.198 + %399 = load i32, i32* %arrayidx1.199, align 4 + %mul.199 = mul nsw i32 %399, %398 + %arrayidx2.199 = getelementptr inbounds i32, i32* %a, i32 %inc.198 + store i32 %mul.199, i32* %arrayidx2.199, align 4 + %inc.199 = or i32 %i.08, 200 + %arrayidx.200 = getelementptr inbounds i32, i32* %b, i32 %inc.199 + %400 = load i32, i32* %arrayidx.200, align 4 + %arrayidx1.200 = getelementptr inbounds i32, i32* %c, i32 %inc.199 + %401 = load i32, i32* %arrayidx1.200, align 4 + %mul.200 = mul nsw i32 %401, %400 + %arrayidx2.200 = getelementptr inbounds i32, i32* %a, i32 %inc.199 + store i32 %mul.200, i32* %arrayidx2.200, align 4 + %inc.200 = or i32 %i.08, 201 + %arrayidx.201 = getelementptr inbounds i32, i32* %b, i32 %inc.200 + %402 = load i32, i32* %arrayidx.201, align 4 + %arrayidx1.201 = getelementptr inbounds i32, i32* %c, i32 %inc.200 + %403 = load i32, i32* %arrayidx1.201, align 4 + %mul.201 = mul nsw i32 %403, %402 + %arrayidx2.201 = getelementptr inbounds i32, i32* %a, i32 %inc.200 + store i32 %mul.201, i32* %arrayidx2.201, align 4 + %inc.201 = or i32 %i.08, 202 + %arrayidx.202 = getelementptr inbounds i32, i32* %b, i32 %inc.201 + %404 = load i32, i32* %arrayidx.202, align 4 + %arrayidx1.202 = getelementptr inbounds i32, i32* %c, i32 %inc.201 + %405 = load i32, i32* %arrayidx1.202, align 4 + %mul.202 = mul nsw i32 %405, %404 + %arrayidx2.202 = getelementptr inbounds i32, i32* %a, i32 %inc.201 + store i32 %mul.202, i32* %arrayidx2.202, align 4 + %inc.202 = or i32 %i.08, 203 + %arrayidx.203 = getelementptr inbounds i32, i32* %b, i32 %inc.202 + %406 = load i32, i32* %arrayidx.203, align 4 + %arrayidx1.203 = getelementptr inbounds i32, i32* %c, i32 %inc.202 + %407 = load i32, i32* %arrayidx1.203, align 4 + %mul.203 = mul nsw i32 %407, %406 + %arrayidx2.203 = getelementptr inbounds i32, i32* %a, i32 %inc.202 + store i32 %mul.203, i32* %arrayidx2.203, align 4 + %inc.203 = or i32 %i.08, 204 + %arrayidx.204 = getelementptr inbounds i32, i32* %b, i32 %inc.203 + %408 = load i32, i32* %arrayidx.204, align 4 + %arrayidx1.204 = getelementptr inbounds i32, i32* %c, i32 %inc.203 + %409 = load i32, i32* %arrayidx1.204, align 4 + %mul.204 = mul nsw i32 %409, %408 + %arrayidx2.204 = getelementptr inbounds i32, i32* %a, i32 %inc.203 + store i32 %mul.204, i32* %arrayidx2.204, align 4 + %inc.204 = or i32 %i.08, 205 + %arrayidx.205 = getelementptr inbounds i32, i32* %b, i32 %inc.204 + %410 = load i32, i32* %arrayidx.205, align 4 + %arrayidx1.205 = getelementptr inbounds i32, i32* %c, i32 %inc.204 + %411 = load i32, i32* %arrayidx1.205, align 4 + %mul.205 = mul nsw i32 %411, %410 + %arrayidx2.205 = getelementptr inbounds i32, i32* %a, i32 %inc.204 + store i32 %mul.205, i32* %arrayidx2.205, align 4 + %inc.205 = or i32 %i.08, 206 + %arrayidx.206 = getelementptr inbounds i32, i32* %b, i32 %inc.205 + %412 = load i32, i32* %arrayidx.206, align 4 + %arrayidx1.206 = getelementptr inbounds i32, i32* %c, i32 %inc.205 + %413 = load i32, i32* %arrayidx1.206, align 4 + %mul.206 = mul nsw i32 %413, %412 + %arrayidx2.206 = getelementptr inbounds i32, i32* %a, i32 %inc.205 + store i32 %mul.206, i32* %arrayidx2.206, align 4 + %inc.206 = or i32 %i.08, 207 + %arrayidx.207 = getelementptr inbounds i32, i32* %b, i32 %inc.206 + %414 = load i32, i32* %arrayidx.207, align 4 + %arrayidx1.207 = getelementptr inbounds i32, i32* %c, i32 %inc.206 + %415 = load i32, i32* %arrayidx1.207, align 4 + %mul.207 = mul nsw i32 %415, %414 + %arrayidx2.207 = getelementptr inbounds i32, i32* %a, i32 %inc.206 + store i32 %mul.207, i32* %arrayidx2.207, align 4 + %inc.207 = or i32 %i.08, 208 + %arrayidx.208 = getelementptr inbounds i32, i32* %b, i32 %inc.207 + %416 = load i32, i32* %arrayidx.208, align 4 + %arrayidx1.208 = getelementptr inbounds i32, i32* %c, i32 %inc.207 + %417 = load i32, i32* %arrayidx1.208, align 4 + %mul.208 = mul nsw i32 %417, %416 + %arrayidx2.208 = getelementptr inbounds i32, i32* %a, i32 %inc.207 + store i32 %mul.208, i32* %arrayidx2.208, align 4 + %inc.208 = or i32 %i.08, 209 + %arrayidx.209 = getelementptr inbounds i32, i32* %b, i32 %inc.208 + %418 = load i32, i32* %arrayidx.209, align 4 + %arrayidx1.209 = getelementptr inbounds i32, i32* %c, i32 %inc.208 + %419 = load i32, i32* %arrayidx1.209, align 4 + %mul.209 = mul nsw i32 %419, %418 + %arrayidx2.209 = getelementptr inbounds i32, i32* %a, i32 %inc.208 + store i32 %mul.209, i32* %arrayidx2.209, align 4 + %inc.209 = or i32 %i.08, 210 + %arrayidx.210 = getelementptr inbounds i32, i32* %b, i32 %inc.209 + %420 = load i32, i32* %arrayidx.210, align 4 + %arrayidx1.210 = getelementptr inbounds i32, i32* %c, i32 %inc.209 + %421 = load i32, i32* %arrayidx1.210, align 4 + %mul.210 = mul nsw i32 %421, %420 + %arrayidx2.210 = getelementptr inbounds i32, i32* %a, i32 %inc.209 + store i32 %mul.210, i32* %arrayidx2.210, align 4 + %inc.210 = or i32 %i.08, 211 + %arrayidx.211 = getelementptr inbounds i32, i32* %b, i32 %inc.210 + %422 = load i32, i32* %arrayidx.211, align 4 + %arrayidx1.211 = getelementptr inbounds i32, i32* %c, i32 %inc.210 + %423 = load i32, i32* %arrayidx1.211, align 4 + %mul.211 = mul nsw i32 %423, %422 + %arrayidx2.211 = getelementptr inbounds i32, i32* %a, i32 %inc.210 + store i32 %mul.211, i32* %arrayidx2.211, align 4 + %inc.211 = or i32 %i.08, 212 + %arrayidx.212 = getelementptr inbounds i32, i32* %b, i32 %inc.211 + %424 = load i32, i32* %arrayidx.212, align 4 + %arrayidx1.212 = getelementptr inbounds i32, i32* %c, i32 %inc.211 + %425 = load i32, i32* %arrayidx1.212, align 4 + %mul.212 = mul nsw i32 %425, %424 + %arrayidx2.212 = getelementptr inbounds i32, i32* %a, i32 %inc.211 + store i32 %mul.212, i32* %arrayidx2.212, align 4 + %inc.212 = or i32 %i.08, 213 + %arrayidx.213 = getelementptr inbounds i32, i32* %b, i32 %inc.212 + %426 = load i32, i32* %arrayidx.213, align 4 + %arrayidx1.213 = getelementptr inbounds i32, i32* %c, i32 %inc.212 + %427 = load i32, i32* %arrayidx1.213, align 4 + %mul.213 = mul nsw i32 %427, %426 + %arrayidx2.213 = getelementptr inbounds i32, i32* %a, i32 %inc.212 + store i32 %mul.213, i32* %arrayidx2.213, align 4 + %inc.213 = or i32 %i.08, 214 + %arrayidx.214 = getelementptr inbounds i32, i32* %b, i32 %inc.213 + %428 = load i32, i32* %arrayidx.214, align 4 + %arrayidx1.214 = getelementptr inbounds i32, i32* %c, i32 %inc.213 + %429 = load i32, i32* %arrayidx1.214, align 4 + %mul.214 = mul nsw i32 %429, %428 + %arrayidx2.214 = getelementptr inbounds i32, i32* %a, i32 %inc.213 + store i32 %mul.214, i32* %arrayidx2.214, align 4 + %inc.214 = or i32 %i.08, 215 + %arrayidx.215 = getelementptr inbounds i32, i32* %b, i32 %inc.214 + %430 = load i32, i32* %arrayidx.215, align 4 + %arrayidx1.215 = getelementptr inbounds i32, i32* %c, i32 %inc.214 + %431 = load i32, i32* %arrayidx1.215, align 4 + %mul.215 = mul nsw i32 %431, %430 + %arrayidx2.215 = getelementptr inbounds i32, i32* %a, i32 %inc.214 + store i32 %mul.215, i32* %arrayidx2.215, align 4 + %inc.215 = or i32 %i.08, 216 + %arrayidx.216 = getelementptr inbounds i32, i32* %b, i32 %inc.215 + %432 = load i32, i32* %arrayidx.216, align 4 + %arrayidx1.216 = getelementptr inbounds i32, i32* %c, i32 %inc.215 + %433 = load i32, i32* %arrayidx1.216, align 4 + %mul.216 = mul nsw i32 %433, %432 + %arrayidx2.216 = getelementptr inbounds i32, i32* %a, i32 %inc.215 + store i32 %mul.216, i32* %arrayidx2.216, align 4 + %inc.216 = or i32 %i.08, 217 + %arrayidx.217 = getelementptr inbounds i32, i32* %b, i32 %inc.216 + %434 = load i32, i32* %arrayidx.217, align 4 + %arrayidx1.217 = getelementptr inbounds i32, i32* %c, i32 %inc.216 + %435 = load i32, i32* %arrayidx1.217, align 4 + %mul.217 = mul nsw i32 %435, %434 + %arrayidx2.217 = getelementptr inbounds i32, i32* %a, i32 %inc.216 + store i32 %mul.217, i32* %arrayidx2.217, align 4 + %inc.217 = or i32 %i.08, 218 + %arrayidx.218 = getelementptr inbounds i32, i32* %b, i32 %inc.217 + %436 = load i32, i32* %arrayidx.218, align 4 + %arrayidx1.218 = getelementptr inbounds i32, i32* %c, i32 %inc.217 + %437 = load i32, i32* %arrayidx1.218, align 4 + %mul.218 = mul nsw i32 %437, %436 + %arrayidx2.218 = getelementptr inbounds i32, i32* %a, i32 %inc.217 + store i32 %mul.218, i32* %arrayidx2.218, align 4 + %inc.218 = or i32 %i.08, 219 + %arrayidx.219 = getelementptr inbounds i32, i32* %b, i32 %inc.218 + %438 = load i32, i32* %arrayidx.219, align 4 + %arrayidx1.219 = getelementptr inbounds i32, i32* %c, i32 %inc.218 + %439 = load i32, i32* %arrayidx1.219, align 4 + %mul.219 = mul nsw i32 %439, %438 + %arrayidx2.219 = getelementptr inbounds i32, i32* %a, i32 %inc.218 + store i32 %mul.219, i32* %arrayidx2.219, align 4 + %inc.219 = or i32 %i.08, 220 + %arrayidx.220 = getelementptr inbounds i32, i32* %b, i32 %inc.219 + %440 = load i32, i32* %arrayidx.220, align 4 + %arrayidx1.220 = getelementptr inbounds i32, i32* %c, i32 %inc.219 + %441 = load i32, i32* %arrayidx1.220, align 4 + %mul.220 = mul nsw i32 %441, %440 + %arrayidx2.220 = getelementptr inbounds i32, i32* %a, i32 %inc.219 + store i32 %mul.220, i32* %arrayidx2.220, align 4 + %inc.220 = or i32 %i.08, 221 + %arrayidx.221 = getelementptr inbounds i32, i32* %b, i32 %inc.220 + %442 = load i32, i32* %arrayidx.221, align 4 + %arrayidx1.221 = getelementptr inbounds i32, i32* %c, i32 %inc.220 + %443 = load i32, i32* %arrayidx1.221, align 4 + %mul.221 = mul nsw i32 %443, %442 + %arrayidx2.221 = getelementptr inbounds i32, i32* %a, i32 %inc.220 + store i32 %mul.221, i32* %arrayidx2.221, align 4 + %inc.221 = or i32 %i.08, 222 + %arrayidx.222 = getelementptr inbounds i32, i32* %b, i32 %inc.221 + %444 = load i32, i32* %arrayidx.222, align 4 + %arrayidx1.222 = getelementptr inbounds i32, i32* %c, i32 %inc.221 + %445 = load i32, i32* %arrayidx1.222, align 4 + %mul.222 = mul nsw i32 %445, %444 + %arrayidx2.222 = getelementptr inbounds i32, i32* %a, i32 %inc.221 + store i32 %mul.222, i32* %arrayidx2.222, align 4 + %inc.222 = or i32 %i.08, 223 + %arrayidx.223 = getelementptr inbounds i32, i32* %b, i32 %inc.222 + %446 = load i32, i32* %arrayidx.223, align 4 + %arrayidx1.223 = getelementptr inbounds i32, i32* %c, i32 %inc.222 + %447 = load i32, i32* %arrayidx1.223, align 4 + %mul.223 = mul nsw i32 %447, %446 + %arrayidx2.223 = getelementptr inbounds i32, i32* %a, i32 %inc.222 + store i32 %mul.223, i32* %arrayidx2.223, align 4 + %inc.223 = or i32 %i.08, 224 + %arrayidx.224 = getelementptr inbounds i32, i32* %b, i32 %inc.223 + %448 = load i32, i32* %arrayidx.224, align 4 + %arrayidx1.224 = getelementptr inbounds i32, i32* %c, i32 %inc.223 + %449 = load i32, i32* %arrayidx1.224, align 4 + %mul.224 = mul nsw i32 %449, %448 + %arrayidx2.224 = getelementptr inbounds i32, i32* %a, i32 %inc.223 + store i32 %mul.224, i32* %arrayidx2.224, align 4 + %inc.224 = or i32 %i.08, 225 + %arrayidx.225 = getelementptr inbounds i32, i32* %b, i32 %inc.224 + %450 = load i32, i32* %arrayidx.225, align 4 + %arrayidx1.225 = getelementptr inbounds i32, i32* %c, i32 %inc.224 + %451 = load i32, i32* %arrayidx1.225, align 4 + %mul.225 = mul nsw i32 %451, %450 + %arrayidx2.225 = getelementptr inbounds i32, i32* %a, i32 %inc.224 + store i32 %mul.225, i32* %arrayidx2.225, align 4 + %inc.225 = or i32 %i.08, 226 + %arrayidx.226 = getelementptr inbounds i32, i32* %b, i32 %inc.225 + %452 = load i32, i32* %arrayidx.226, align 4 + %arrayidx1.226 = getelementptr inbounds i32, i32* %c, i32 %inc.225 + %453 = load i32, i32* %arrayidx1.226, align 4 + %mul.226 = mul nsw i32 %453, %452 + %arrayidx2.226 = getelementptr inbounds i32, i32* %a, i32 %inc.225 + store i32 %mul.226, i32* %arrayidx2.226, align 4 + %inc.226 = or i32 %i.08, 227 + %arrayidx.227 = getelementptr inbounds i32, i32* %b, i32 %inc.226 + %454 = load i32, i32* %arrayidx.227, align 4 + %arrayidx1.227 = getelementptr inbounds i32, i32* %c, i32 %inc.226 + %455 = load i32, i32* %arrayidx1.227, align 4 + %mul.227 = mul nsw i32 %455, %454 + %arrayidx2.227 = getelementptr inbounds i32, i32* %a, i32 %inc.226 + store i32 %mul.227, i32* %arrayidx2.227, align 4 + %inc.227 = or i32 %i.08, 228 + %arrayidx.228 = getelementptr inbounds i32, i32* %b, i32 %inc.227 + %456 = load i32, i32* %arrayidx.228, align 4 + %arrayidx1.228 = getelementptr inbounds i32, i32* %c, i32 %inc.227 + %457 = load i32, i32* %arrayidx1.228, align 4 + %mul.228 = mul nsw i32 %457, %456 + %arrayidx2.228 = getelementptr inbounds i32, i32* %a, i32 %inc.227 + store i32 %mul.228, i32* %arrayidx2.228, align 4 + %inc.228 = or i32 %i.08, 229 + %arrayidx.229 = getelementptr inbounds i32, i32* %b, i32 %inc.228 + %458 = load i32, i32* %arrayidx.229, align 4 + %arrayidx1.229 = getelementptr inbounds i32, i32* %c, i32 %inc.228 + %459 = load i32, i32* %arrayidx1.229, align 4 + %mul.229 = mul nsw i32 %459, %458 + %arrayidx2.229 = getelementptr inbounds i32, i32* %a, i32 %inc.228 + store i32 %mul.229, i32* %arrayidx2.229, align 4 + %inc.229 = or i32 %i.08, 230 + %arrayidx.230 = getelementptr inbounds i32, i32* %b, i32 %inc.229 + %460 = load i32, i32* %arrayidx.230, align 4 + %arrayidx1.230 = getelementptr inbounds i32, i32* %c, i32 %inc.229 + %461 = load i32, i32* %arrayidx1.230, align 4 + %mul.230 = mul nsw i32 %461, %460 + %arrayidx2.230 = getelementptr inbounds i32, i32* %a, i32 %inc.229 + store i32 %mul.230, i32* %arrayidx2.230, align 4 + %inc.230 = or i32 %i.08, 231 + %arrayidx.231 = getelementptr inbounds i32, i32* %b, i32 %inc.230 + %462 = load i32, i32* %arrayidx.231, align 4 + %arrayidx1.231 = getelementptr inbounds i32, i32* %c, i32 %inc.230 + %463 = load i32, i32* %arrayidx1.231, align 4 + %mul.231 = mul nsw i32 %463, %462 + %arrayidx2.231 = getelementptr inbounds i32, i32* %a, i32 %inc.230 + store i32 %mul.231, i32* %arrayidx2.231, align 4 + %inc.231 = or i32 %i.08, 232 + %arrayidx.232 = getelementptr inbounds i32, i32* %b, i32 %inc.231 + %464 = load i32, i32* %arrayidx.232, align 4 + %arrayidx1.232 = getelementptr inbounds i32, i32* %c, i32 %inc.231 + %465 = load i32, i32* %arrayidx1.232, align 4 + %mul.232 = mul nsw i32 %465, %464 + %arrayidx2.232 = getelementptr inbounds i32, i32* %a, i32 %inc.231 + store i32 %mul.232, i32* %arrayidx2.232, align 4 + %inc.232 = or i32 %i.08, 233 + %arrayidx.233 = getelementptr inbounds i32, i32* %b, i32 %inc.232 + %466 = load i32, i32* %arrayidx.233, align 4 + %arrayidx1.233 = getelementptr inbounds i32, i32* %c, i32 %inc.232 + %467 = load i32, i32* %arrayidx1.233, align 4 + %mul.233 = mul nsw i32 %467, %466 + %arrayidx2.233 = getelementptr inbounds i32, i32* %a, i32 %inc.232 + store i32 %mul.233, i32* %arrayidx2.233, align 4 + %inc.233 = or i32 %i.08, 234 + %arrayidx.234 = getelementptr inbounds i32, i32* %b, i32 %inc.233 + %468 = load i32, i32* %arrayidx.234, align 4 + %arrayidx1.234 = getelementptr inbounds i32, i32* %c, i32 %inc.233 + %469 = load i32, i32* %arrayidx1.234, align 4 + %mul.234 = mul nsw i32 %469, %468 + %arrayidx2.234 = getelementptr inbounds i32, i32* %a, i32 %inc.233 + store i32 %mul.234, i32* %arrayidx2.234, align 4 + %inc.234 = or i32 %i.08, 235 + %arrayidx.235 = getelementptr inbounds i32, i32* %b, i32 %inc.234 + %470 = load i32, i32* %arrayidx.235, align 4 + %arrayidx1.235 = getelementptr inbounds i32, i32* %c, i32 %inc.234 + %471 = load i32, i32* %arrayidx1.235, align 4 + %mul.235 = mul nsw i32 %471, %470 + %arrayidx2.235 = getelementptr inbounds i32, i32* %a, i32 %inc.234 + store i32 %mul.235, i32* %arrayidx2.235, align 4 + %inc.235 = or i32 %i.08, 236 + %arrayidx.236 = getelementptr inbounds i32, i32* %b, i32 %inc.235 + %472 = load i32, i32* %arrayidx.236, align 4 + %arrayidx1.236 = getelementptr inbounds i32, i32* %c, i32 %inc.235 + %473 = load i32, i32* %arrayidx1.236, align 4 + %mul.236 = mul nsw i32 %473, %472 + %arrayidx2.236 = getelementptr inbounds i32, i32* %a, i32 %inc.235 + store i32 %mul.236, i32* %arrayidx2.236, align 4 + %inc.236 = or i32 %i.08, 237 + %arrayidx.237 = getelementptr inbounds i32, i32* %b, i32 %inc.236 + %474 = load i32, i32* %arrayidx.237, align 4 + %arrayidx1.237 = getelementptr inbounds i32, i32* %c, i32 %inc.236 + %475 = load i32, i32* %arrayidx1.237, align 4 + %mul.237 = mul nsw i32 %475, %474 + %arrayidx2.237 = getelementptr inbounds i32, i32* %a, i32 %inc.236 + store i32 %mul.237, i32* %arrayidx2.237, align 4 + %inc.237 = or i32 %i.08, 238 + %arrayidx.238 = getelementptr inbounds i32, i32* %b, i32 %inc.237 + %476 = load i32, i32* %arrayidx.238, align 4 + %arrayidx1.238 = getelementptr inbounds i32, i32* %c, i32 %inc.237 + %477 = load i32, i32* %arrayidx1.238, align 4 + %mul.238 = mul nsw i32 %477, %476 + %arrayidx2.238 = getelementptr inbounds i32, i32* %a, i32 %inc.237 + store i32 %mul.238, i32* %arrayidx2.238, align 4 + %inc.238 = or i32 %i.08, 239 + %arrayidx.239 = getelementptr inbounds i32, i32* %b, i32 %inc.238 + %478 = load i32, i32* %arrayidx.239, align 4 + %arrayidx1.239 = getelementptr inbounds i32, i32* %c, i32 %inc.238 + %479 = load i32, i32* %arrayidx1.239, align 4 + %mul.239 = mul nsw i32 %479, %478 + %arrayidx2.239 = getelementptr inbounds i32, i32* %a, i32 %inc.238 + store i32 %mul.239, i32* %arrayidx2.239, align 4 + %inc.239 = or i32 %i.08, 240 + %arrayidx.240 = getelementptr inbounds i32, i32* %b, i32 %inc.239 + %480 = load i32, i32* %arrayidx.240, align 4 + %arrayidx1.240 = getelementptr inbounds i32, i32* %c, i32 %inc.239 + %481 = load i32, i32* %arrayidx1.240, align 4 + %mul.240 = mul nsw i32 %481, %480 + %arrayidx2.240 = getelementptr inbounds i32, i32* %a, i32 %inc.239 + store i32 %mul.240, i32* %arrayidx2.240, align 4 + %inc.240 = or i32 %i.08, 241 + %arrayidx.241 = getelementptr inbounds i32, i32* %b, i32 %inc.240 + %482 = load i32, i32* %arrayidx.241, align 4 + %arrayidx1.241 = getelementptr inbounds i32, i32* %c, i32 %inc.240 + %483 = load i32, i32* %arrayidx1.241, align 4 + %mul.241 = mul nsw i32 %483, %482 + %arrayidx2.241 = getelementptr inbounds i32, i32* %a, i32 %inc.240 + store i32 %mul.241, i32* %arrayidx2.241, align 4 + %inc.241 = or i32 %i.08, 242 + %arrayidx.242 = getelementptr inbounds i32, i32* %b, i32 %inc.241 + %484 = load i32, i32* %arrayidx.242, align 4 + %arrayidx1.242 = getelementptr inbounds i32, i32* %c, i32 %inc.241 + %485 = load i32, i32* %arrayidx1.242, align 4 + %mul.242 = mul nsw i32 %485, %484 + %arrayidx2.242 = getelementptr inbounds i32, i32* %a, i32 %inc.241 + store i32 %mul.242, i32* %arrayidx2.242, align 4 + %inc.242 = or i32 %i.08, 243 + %arrayidx.243 = getelementptr inbounds i32, i32* %b, i32 %inc.242 + %486 = load i32, i32* %arrayidx.243, align 4 + %arrayidx1.243 = getelementptr inbounds i32, i32* %c, i32 %inc.242 + %487 = load i32, i32* %arrayidx1.243, align 4 + %mul.243 = mul nsw i32 %487, %486 + %arrayidx2.243 = getelementptr inbounds i32, i32* %a, i32 %inc.242 + store i32 %mul.243, i32* %arrayidx2.243, align 4 + %inc.243 = or i32 %i.08, 244 + %arrayidx.244 = getelementptr inbounds i32, i32* %b, i32 %inc.243 + %488 = load i32, i32* %arrayidx.244, align 4 + %arrayidx1.244 = getelementptr inbounds i32, i32* %c, i32 %inc.243 + %489 = load i32, i32* %arrayidx1.244, align 4 + %mul.244 = mul nsw i32 %489, %488 + %arrayidx2.244 = getelementptr inbounds i32, i32* %a, i32 %inc.243 + store i32 %mul.244, i32* %arrayidx2.244, align 4 + %inc.244 = or i32 %i.08, 245 + %arrayidx.245 = getelementptr inbounds i32, i32* %b, i32 %inc.244 + %490 = load i32, i32* %arrayidx.245, align 4 + %arrayidx1.245 = getelementptr inbounds i32, i32* %c, i32 %inc.244 + %491 = load i32, i32* %arrayidx1.245, align 4 + %mul.245 = mul nsw i32 %491, %490 + %arrayidx2.245 = getelementptr inbounds i32, i32* %a, i32 %inc.244 + store i32 %mul.245, i32* %arrayidx2.245, align 4 + %inc.245 = or i32 %i.08, 246 + %arrayidx.246 = getelementptr inbounds i32, i32* %b, i32 %inc.245 + %492 = load i32, i32* %arrayidx.246, align 4 + %arrayidx1.246 = getelementptr inbounds i32, i32* %c, i32 %inc.245 + %493 = load i32, i32* %arrayidx1.246, align 4 + %mul.246 = mul nsw i32 %493, %492 + %arrayidx2.246 = getelementptr inbounds i32, i32* %a, i32 %inc.245 + store i32 %mul.246, i32* %arrayidx2.246, align 4 + %inc.246 = or i32 %i.08, 247 + %arrayidx.247 = getelementptr inbounds i32, i32* %b, i32 %inc.246 + %494 = load i32, i32* %arrayidx.247, align 4 + %arrayidx1.247 = getelementptr inbounds i32, i32* %c, i32 %inc.246 + %495 = load i32, i32* %arrayidx1.247, align 4 + %mul.247 = mul nsw i32 %495, %494 + %arrayidx2.247 = getelementptr inbounds i32, i32* %a, i32 %inc.246 + store i32 %mul.247, i32* %arrayidx2.247, align 4 + %inc.247 = or i32 %i.08, 248 + %arrayidx.248 = getelementptr inbounds i32, i32* %b, i32 %inc.247 + %496 = load i32, i32* %arrayidx.248, align 4 + %arrayidx1.248 = getelementptr inbounds i32, i32* %c, i32 %inc.247 + %497 = load i32, i32* %arrayidx1.248, align 4 + %mul.248 = mul nsw i32 %497, %496 + %arrayidx2.248 = getelementptr inbounds i32, i32* %a, i32 %inc.247 + store i32 %mul.248, i32* %arrayidx2.248, align 4 + %inc.248 = or i32 %i.08, 249 + %arrayidx.249 = getelementptr inbounds i32, i32* %b, i32 %inc.248 + %498 = load i32, i32* %arrayidx.249, align 4 + %arrayidx1.249 = getelementptr inbounds i32, i32* %c, i32 %inc.248 + %499 = load i32, i32* %arrayidx1.249, align 4 + %mul.249 = mul nsw i32 %499, %498 + %arrayidx2.249 = getelementptr inbounds i32, i32* %a, i32 %inc.248 + store i32 %mul.249, i32* %arrayidx2.249, align 4 + %inc.249 = or i32 %i.08, 250 + %arrayidx.250 = getelementptr inbounds i32, i32* %b, i32 %inc.249 + %500 = load i32, i32* %arrayidx.250, align 4 + %arrayidx1.250 = getelementptr inbounds i32, i32* %c, i32 %inc.249 + %501 = load i32, i32* %arrayidx1.250, align 4 + %mul.250 = mul nsw i32 %501, %500 + %arrayidx2.250 = getelementptr inbounds i32, i32* %a, i32 %inc.249 + store i32 %mul.250, i32* %arrayidx2.250, align 4 + %inc.250 = or i32 %i.08, 251 + %arrayidx.251 = getelementptr inbounds i32, i32* %b, i32 %inc.250 + %502 = load i32, i32* %arrayidx.251, align 4 + %arrayidx1.251 = getelementptr inbounds i32, i32* %c, i32 %inc.250 + %503 = load i32, i32* %arrayidx1.251, align 4 + %mul.251 = mul nsw i32 %503, %502 + %arrayidx2.251 = getelementptr inbounds i32, i32* %a, i32 %inc.250 + store i32 %mul.251, i32* %arrayidx2.251, align 4 + %inc.251 = or i32 %i.08, 252 + %arrayidx.252 = getelementptr inbounds i32, i32* %b, i32 %inc.251 + %504 = load i32, i32* %arrayidx.252, align 4 + %arrayidx1.252 = getelementptr inbounds i32, i32* %c, i32 %inc.251 + %505 = load i32, i32* %arrayidx1.252, align 4 + %mul.252 = mul nsw i32 %505, %504 + %arrayidx2.252 = getelementptr inbounds i32, i32* %a, i32 %inc.251 + store i32 %mul.252, i32* %arrayidx2.252, align 4 + %inc.252 = or i32 %i.08, 253 + %arrayidx.253 = getelementptr inbounds i32, i32* %b, i32 %inc.252 + %506 = load i32, i32* %arrayidx.253, align 4 + %arrayidx1.253 = getelementptr inbounds i32, i32* %c, i32 %inc.252 + %507 = load i32, i32* %arrayidx1.253, align 4 + %mul.253 = mul nsw i32 %507, %506 + %arrayidx2.253 = getelementptr inbounds i32, i32* %a, i32 %inc.252 + store i32 %mul.253, i32* %arrayidx2.253, align 4 + %inc.253 = or i32 %i.08, 254 + %arrayidx.254 = getelementptr inbounds i32, i32* %b, i32 %inc.253 + %508 = load i32, i32* %arrayidx.254, align 4 + %arrayidx1.254 = getelementptr inbounds i32, i32* %c, i32 %inc.253 + %509 = load i32, i32* %arrayidx1.254, align 4 + %mul.254 = mul nsw i32 %509, %508 + %arrayidx2.254 = getelementptr inbounds i32, i32* %a, i32 %inc.253 + store i32 %mul.254, i32* %arrayidx2.254, align 4 + %inc.254 = or i32 %i.08, 255 + %arrayidx.255 = getelementptr inbounds i32, i32* %b, i32 %inc.254 + %510 = load i32, i32* %arrayidx.255, align 4 + %arrayidx1.255 = getelementptr inbounds i32, i32* %c, i32 %inc.254 + %511 = load i32, i32* %arrayidx1.255, align 4 + %mul.255 = mul nsw i32 %511, %510 + %arrayidx2.255 = getelementptr inbounds i32, i32* %a, i32 %inc.254 + store i32 %mul.255, i32* %arrayidx2.255, align 4 + %inc.255 = add nuw nsw i32 %i.08, 256 + %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %count, i32 1) + %exitcond.255 = icmp ne i32 %loop.dec, 0 + br i1 %exitcond.255, label %for.body, label %for.cond.cleanup + } + + declare void @llvm.set.loop.iterations.i32(i32) #0 + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0 + declare void @llvm.stackprotector(i8*, i8**) #1 + + attributes #0 = { noduplicate nounwind } + attributes #1 = { nounwind } + +... +--- +name: massive +alignment: 1 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } + - { reg: '$r2', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 16 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x80000000) + liveins: $r0, $r1, $r2, $r4, $r5, $r7, $lr + + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr + frame-setup CFI_INSTRUCTION def_cfa_offset 16 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + frame-setup CFI_INSTRUCTION offset $r5, -12 + frame-setup CFI_INSTRUCTION offset $r4, -16 + renamable $lr = t2MOVi 4, 14, $noreg, $noreg + renamable $r3 = t2MOVi 0, 14, $noreg, $noreg + t2DoLoopStart renamable $lr + + bb.1.for.body: + successors: %bb.1(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $r0, $r1, $r2, $r3 + + renamable $r12 = t2LDRs renamable $r1, renamable $r3, 2, 14, $noreg :: (load 4 from %ir.arrayidx) + renamable $r4 = t2LDRs renamable $r2, renamable $r3, 2, 14, $noreg :: (load 4 from %ir.arrayidx1) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, renamable $r3, 2, 14, $noreg :: (store 4 into %ir.arrayidx2) + renamable $r4 = t2ORRri renamable $r3, 1, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r4, 2, 14, $noreg :: (load 4 from %ir.arrayidx.1) + renamable $r5 = t2LDRs renamable $r2, renamable $r4, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.1) + renamable $r5 = nsw t2MUL killed renamable $r5, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r5, renamable $r0, killed renamable $r4, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.1) + renamable $r5 = t2ORRri renamable $r3, 2, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.2) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.2) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.2) + renamable $r5 = t2ORRri renamable $r3, 3, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.3) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.3) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.3) + renamable $r5 = t2ORRri renamable $r3, 4, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.4) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.4) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.4) + renamable $r5 = t2ORRri renamable $r3, 5, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.5) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.5) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.5) + renamable $r5 = t2ORRri renamable $r3, 6, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.6) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.6) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.6) + renamable $r5 = t2ORRri renamable $r3, 7, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.7) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.7) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.7) + renamable $r5 = t2ORRri renamable $r3, 8, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.8) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.8) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.8) + renamable $r5 = t2ORRri renamable $r3, 9, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.9) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.9) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.9) + renamable $r5 = t2ORRri renamable $r3, 10, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.10) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.10) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.10) + renamable $r5 = t2ORRri renamable $r3, 11, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.11) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.11) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.11) + renamable $r5 = t2ORRri renamable $r3, 12, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.12) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.12) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.12) + renamable $r5 = t2ORRri renamable $r3, 13, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.13) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.13) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.13) + renamable $r5 = t2ORRri renamable $r3, 14, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.14) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.14) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.14) + renamable $r5 = t2ORRri renamable $r3, 15, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.15) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.15) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.15) + renamable $r5 = t2ORRri renamable $r3, 16, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.16) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.16) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.16) + renamable $r5 = t2ORRri renamable $r3, 17, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.17) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.17) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.17) + renamable $r5 = t2ORRri renamable $r3, 18, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.18) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.18) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.18) + renamable $r5 = t2ORRri renamable $r3, 19, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.19) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.19) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.19) + renamable $r5 = t2ORRri renamable $r3, 20, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.20) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.20) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.20) + renamable $r5 = t2ORRri renamable $r3, 21, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.21) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.21) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.21) + renamable $r5 = t2ORRri renamable $r3, 22, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.22) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.22) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.22) + renamable $r5 = t2ORRri renamable $r3, 23, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.23) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.23) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.23) + renamable $r5 = t2ORRri renamable $r3, 24, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.24) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.24) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.24) + renamable $r5 = t2ORRri renamable $r3, 25, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.25) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.25) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.25) + renamable $r5 = t2ORRri renamable $r3, 26, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.26) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.26) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.26) + renamable $r5 = t2ORRri renamable $r3, 27, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.27) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.27) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.27) + renamable $r5 = t2ORRri renamable $r3, 28, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.28) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.28) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.28) + renamable $r5 = t2ORRri renamable $r3, 29, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.29) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.29) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.29) + renamable $r5 = t2ORRri renamable $r3, 30, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.30) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.30) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.30) + renamable $r5 = t2ORRri renamable $r3, 31, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.31) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.31) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.31) + renamable $r5 = t2ORRri renamable $r3, 32, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.32) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.32) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.32) + renamable $r5 = t2ORRri renamable $r3, 33, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.33) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.33) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.33) + renamable $r5 = t2ORRri renamable $r3, 34, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.34) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.34) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.34) + renamable $r5 = t2ORRri renamable $r3, 35, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.35) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.35) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.35) + renamable $r5 = t2ORRri renamable $r3, 36, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.36) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.36) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.36) + renamable $r5 = t2ORRri renamable $r3, 37, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.37) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.37) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.37) + renamable $r5 = t2ORRri renamable $r3, 38, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.38) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.38) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.38) + renamable $r5 = t2ORRri renamable $r3, 39, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.39) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.39) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.39) + renamable $r5 = t2ORRri renamable $r3, 40, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.40) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.40) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.40) + renamable $r5 = t2ORRri renamable $r3, 41, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.41) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.41) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.41) + renamable $r5 = t2ORRri renamable $r3, 42, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.42) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.42) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.42) + renamable $r5 = t2ORRri renamable $r3, 43, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.43) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.43) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.43) + renamable $r5 = t2ORRri renamable $r3, 44, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.44) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.44) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.44) + renamable $r5 = t2ORRri renamable $r3, 45, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.45) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.45) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.45) + renamable $r5 = t2ORRri renamable $r3, 46, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.46) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.46) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.46) + renamable $r5 = t2ORRri renamable $r3, 47, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.47) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.47) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.47) + renamable $r5 = t2ORRri renamable $r3, 48, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.48) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.48) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.48) + renamable $r5 = t2ORRri renamable $r3, 49, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.49) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.49) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.49) + renamable $r5 = t2ORRri renamable $r3, 50, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.50) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.50) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.50) + renamable $r5 = t2ORRri renamable $r3, 51, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.51) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.51) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.51) + renamable $r5 = t2ORRri renamable $r3, 52, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.52) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.52) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.52) + renamable $r5 = t2ORRri renamable $r3, 53, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.53) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.53) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.53) + renamable $r5 = t2ORRri renamable $r3, 54, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.54) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.54) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.54) + renamable $r5 = t2ORRri renamable $r3, 55, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.55) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.55) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.55) + renamable $r5 = t2ORRri renamable $r3, 56, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.56) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.56) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.56) + renamable $r5 = t2ORRri renamable $r3, 57, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.57) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.57) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.57) + renamable $r5 = t2ORRri renamable $r3, 58, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.58) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.58) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.58) + renamable $r5 = t2ORRri renamable $r3, 59, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.59) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.59) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.59) + renamable $r5 = t2ORRri renamable $r3, 60, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.60) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.60) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.60) + renamable $r5 = t2ORRri renamable $r3, 61, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.61) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.61) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.61) + renamable $r5 = t2ORRri renamable $r3, 62, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.62) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.62) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.62) + renamable $r5 = t2ORRri renamable $r3, 63, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.63) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.63) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.63) + renamable $r5 = t2ORRri renamable $r3, 64, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.64) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.64) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.64) + renamable $r5 = t2ORRri renamable $r3, 65, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.65) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.65) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.65) + renamable $r5 = t2ORRri renamable $r3, 66, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.66) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.66) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.66) + renamable $r5 = t2ORRri renamable $r3, 67, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.67) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.67) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.67) + renamable $r5 = t2ORRri renamable $r3, 68, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.68) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.68) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.68) + renamable $r5 = t2ORRri renamable $r3, 69, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.69) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.69) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.69) + renamable $r5 = t2ORRri renamable $r3, 70, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.70) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.70) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.70) + renamable $r5 = t2ORRri renamable $r3, 71, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.71) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.71) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.71) + renamable $r5 = t2ORRri renamable $r3, 72, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.72) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.72) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.72) + renamable $r5 = t2ORRri renamable $r3, 73, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.73) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.73) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.73) + renamable $r5 = t2ORRri renamable $r3, 74, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.74) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.74) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.74) + renamable $r5 = t2ORRri renamable $r3, 75, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.75) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.75) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.75) + renamable $r5 = t2ORRri renamable $r3, 76, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.76) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.76) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.76) + renamable $r5 = t2ORRri renamable $r3, 77, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.77) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.77) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.77) + renamable $r5 = t2ORRri renamable $r3, 78, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.78) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.78) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.78) + renamable $r5 = t2ORRri renamable $r3, 79, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.79) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.79) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.79) + renamable $r5 = t2ORRri renamable $r3, 80, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.80) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.80) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.80) + renamable $r5 = t2ORRri renamable $r3, 81, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.81) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.81) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.81) + renamable $r5 = t2ORRri renamable $r3, 82, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.82) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.82) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.82) + renamable $r5 = t2ORRri renamable $r3, 83, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.83) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.83) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.83) + renamable $r5 = t2ORRri renamable $r3, 84, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.84) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.84) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.84) + renamable $r5 = t2ORRri renamable $r3, 85, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.85) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.85) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.85) + renamable $r5 = t2ORRri renamable $r3, 86, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.86) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.86) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.86) + renamable $r5 = t2ORRri renamable $r3, 87, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.87) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.87) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.87) + renamable $r5 = t2ORRri renamable $r3, 88, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.88) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.88) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.88) + renamable $r5 = t2ORRri renamable $r3, 89, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.89) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.89) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.89) + renamable $r5 = t2ORRri renamable $r3, 90, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.90) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.90) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.90) + renamable $r5 = t2ORRri renamable $r3, 91, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.91) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.91) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.91) + renamable $r5 = t2ORRri renamable $r3, 92, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.92) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.92) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.92) + renamable $r5 = t2ORRri renamable $r3, 93, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.93) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.93) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.93) + renamable $r5 = t2ORRri renamable $r3, 94, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.94) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.94) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.94) + renamable $r5 = t2ORRri renamable $r3, 95, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.95) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.95) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.95) + renamable $r5 = t2ORRri renamable $r3, 96, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.96) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.96) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.96) + renamable $r5 = t2ORRri renamable $r3, 97, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.97) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.97) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.97) + renamable $r5 = t2ORRri renamable $r3, 98, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.98) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.98) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.98) + renamable $r5 = t2ORRri renamable $r3, 99, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.99) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.99) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.99) + renamable $r5 = t2ORRri renamable $r3, 100, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.100) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.100) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.100) + renamable $r5 = t2ORRri renamable $r3, 101, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.101) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.101) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.101) + renamable $r5 = t2ORRri renamable $r3, 102, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.102) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.102) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.102) + renamable $r5 = t2ORRri renamable $r3, 103, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.103) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.103) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.103) + renamable $r5 = t2ORRri renamable $r3, 104, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.104) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.104) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.104) + renamable $r5 = t2ORRri renamable $r3, 105, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.105) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.105) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.105) + renamable $r5 = t2ORRri renamable $r3, 106, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.106) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.106) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.106) + renamable $r5 = t2ORRri renamable $r3, 107, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.107) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.107) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.107) + renamable $r5 = t2ORRri renamable $r3, 108, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.108) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.108) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.108) + renamable $r5 = t2ORRri renamable $r3, 109, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.109) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.109) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.109) + renamable $r5 = t2ORRri renamable $r3, 110, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.110) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.110) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.110) + renamable $r5 = t2ORRri renamable $r3, 111, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.111) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.111) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.111) + renamable $r5 = t2ORRri renamable $r3, 112, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.112) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.112) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.112) + renamable $r5 = t2ORRri renamable $r3, 113, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.113) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.113) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.113) + renamable $r5 = t2ORRri renamable $r3, 114, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.114) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.114) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.114) + renamable $r5 = t2ORRri renamable $r3, 115, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.115) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.115) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.115) + renamable $r5 = t2ORRri renamable $r3, 116, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.116) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.116) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.116) + renamable $r5 = t2ORRri renamable $r3, 117, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.117) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.117) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.117) + renamable $r5 = t2ORRri renamable $r3, 118, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.118) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.118) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.118) + renamable $r5 = t2ORRri renamable $r3, 119, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.119) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.119) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.119) + renamable $r5 = t2ORRri renamable $r3, 120, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.120) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.120) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.120) + renamable $r5 = t2ORRri renamable $r3, 121, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.121) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.121) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.121) + renamable $r5 = t2ORRri renamable $r3, 122, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.122) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.122) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.122) + renamable $r5 = t2ORRri renamable $r3, 123, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.123) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.123) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.123) + renamable $r5 = t2ORRri renamable $r3, 124, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.124) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.124) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.124) + renamable $r5 = t2ORRri renamable $r3, 125, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.125) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.125) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.125) + renamable $r5 = t2ORRri renamable $r3, 126, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.126) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.126) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.126) + renamable $r5 = t2ORRri renamable $r3, 127, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.127) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.127) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.127) + renamable $r5 = t2ORRri renamable $r3, 128, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.128) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.128) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.128) + renamable $r5 = t2ORRri renamable $r3, 129, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.129) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.129) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.129) + renamable $r5 = t2ORRri renamable $r3, 130, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.130) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.130) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.130) + renamable $r5 = t2ORRri renamable $r3, 131, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.131) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.131) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.131) + renamable $r5 = t2ORRri renamable $r3, 132, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.132) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.132) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.132) + renamable $r5 = t2ORRri renamable $r3, 133, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.133) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.133) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.133) + renamable $r5 = t2ORRri renamable $r3, 134, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.134) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.134) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.134) + renamable $r5 = t2ORRri renamable $r3, 135, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.135) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.135) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.135) + renamable $r5 = t2ORRri renamable $r3, 136, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.136) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.136) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.136) + renamable $r5 = t2ORRri renamable $r3, 137, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.137) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.137) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.137) + renamable $r5 = t2ORRri renamable $r3, 138, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.138) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.138) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.138) + renamable $r5 = t2ORRri renamable $r3, 139, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.139) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.139) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.139) + renamable $r5 = t2ORRri renamable $r3, 140, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.140) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.140) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.140) + renamable $r5 = t2ORRri renamable $r3, 141, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.141) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.141) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.141) + renamable $r5 = t2ORRri renamable $r3, 142, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.142) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.142) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.142) + renamable $r5 = t2ORRri renamable $r3, 143, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.143) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.143) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.143) + renamable $r5 = t2ORRri renamable $r3, 144, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.144) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.144) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.144) + renamable $r5 = t2ORRri renamable $r3, 145, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.145) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.145) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.145) + renamable $r5 = t2ORRri renamable $r3, 146, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.146) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.146) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.146) + renamable $r5 = t2ORRri renamable $r3, 147, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.147) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.147) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.147) + renamable $r5 = t2ORRri renamable $r3, 148, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.148) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.148) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.148) + renamable $r5 = t2ORRri renamable $r3, 149, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.149) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.149) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.149) + renamable $r5 = t2ORRri renamable $r3, 150, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.150) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.150) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.150) + renamable $r5 = t2ORRri renamable $r3, 151, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.151) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.151) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.151) + renamable $r5 = t2ORRri renamable $r3, 152, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.152) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.152) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.152) + renamable $r5 = t2ORRri renamable $r3, 153, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.153) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.153) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.153) + renamable $r5 = t2ORRri renamable $r3, 154, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.154) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.154) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.154) + renamable $r5 = t2ORRri renamable $r3, 155, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.155) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.155) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.155) + renamable $r5 = t2ORRri renamable $r3, 156, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.156) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.156) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.156) + renamable $r5 = t2ORRri renamable $r3, 157, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.157) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.157) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.157) + renamable $r5 = t2ORRri renamable $r3, 158, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.158) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.158) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.158) + renamable $r5 = t2ORRri renamable $r3, 159, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.159) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.159) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.159) + renamable $r5 = t2ORRri renamable $r3, 160, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.160) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.160) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.160) + renamable $r5 = t2ORRri renamable $r3, 161, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.161) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.161) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.161) + renamable $r5 = t2ORRri renamable $r3, 162, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.162) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.162) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.162) + renamable $r5 = t2ORRri renamable $r3, 163, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.163) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.163) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.163) + renamable $r5 = t2ORRri renamable $r3, 164, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.164) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.164) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.164) + renamable $r5 = t2ORRri renamable $r3, 165, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.165) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.165) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.165) + renamable $r5 = t2ORRri renamable $r3, 166, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.166) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.166) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.166) + renamable $r5 = t2ORRri renamable $r3, 167, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.167) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.167) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.167) + renamable $r5 = t2ORRri renamable $r3, 168, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.168) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.168) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.168) + renamable $r5 = t2ORRri renamable $r3, 169, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.169) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.169) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.169) + renamable $r5 = t2ORRri renamable $r3, 170, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.170) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.170) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.170) + renamable $r5 = t2ORRri renamable $r3, 171, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.171) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.171) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.171) + renamable $r5 = t2ORRri renamable $r3, 172, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.172) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.172) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.172) + renamable $r5 = t2ORRri renamable $r3, 173, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.173) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.173) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.173) + renamable $r5 = t2ORRri renamable $r3, 174, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.174) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.174) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.174) + renamable $r5 = t2ORRri renamable $r3, 175, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.175) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.175) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.175) + renamable $r5 = t2ORRri renamable $r3, 176, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.176) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.176) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.176) + renamable $r5 = t2ORRri renamable $r3, 177, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.177) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.177) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.177) + renamable $r5 = t2ORRri renamable $r3, 178, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.178) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.178) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.178) + renamable $r5 = t2ORRri renamable $r3, 179, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.179) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.179) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.179) + renamable $r5 = t2ORRri renamable $r3, 180, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.180) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.180) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.180) + renamable $r5 = t2ORRri renamable $r3, 181, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.181) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.181) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.181) + renamable $r5 = t2ORRri renamable $r3, 182, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.182) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.182) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.182) + renamable $r5 = t2ORRri renamable $r3, 183, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.183) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.183) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.183) + renamable $r5 = t2ORRri renamable $r3, 184, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.184) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.184) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.184) + renamable $r5 = t2ORRri renamable $r3, 185, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.185) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.185) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.185) + renamable $r5 = t2ORRri renamable $r3, 186, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.186) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.186) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.186) + renamable $r5 = t2ORRri renamable $r3, 187, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.187) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.187) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.187) + renamable $r5 = t2ORRri renamable $r3, 188, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.188) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.188) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.188) + renamable $r5 = t2ORRri renamable $r3, 189, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.189) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.189) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.189) + renamable $r5 = t2ORRri renamable $r3, 190, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.190) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.190) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.190) + renamable $r5 = t2ORRri renamable $r3, 191, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.191) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.191) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.191) + renamable $r5 = t2ORRri renamable $r3, 192, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.192) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.192) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.192) + renamable $r5 = t2ORRri renamable $r3, 193, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.193) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.193) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.193) + renamable $r5 = t2ORRri renamable $r3, 194, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.194) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.194) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.194) + renamable $r5 = t2ORRri renamable $r3, 195, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.195) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.195) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.195) + renamable $r5 = t2ORRri renamable $r3, 196, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.196) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.196) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.196) + renamable $r5 = t2ORRri renamable $r3, 197, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.197) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.197) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.197) + renamable $r5 = t2ORRri renamable $r3, 198, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.198) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.198) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.198) + renamable $r5 = t2ORRri renamable $r3, 199, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.199) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.199) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.199) + renamable $r5 = t2ORRri renamable $r3, 200, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.200) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.200) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.200) + renamable $r5 = t2ORRri renamable $r3, 201, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.201) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.201) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.201) + renamable $r5 = t2ORRri renamable $r3, 202, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.202) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.202) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.202) + renamable $r5 = t2ORRri renamable $r3, 203, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.203) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.203) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.203) + renamable $r5 = t2ORRri renamable $r3, 204, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.204) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.204) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.204) + renamable $r5 = t2ORRri renamable $r3, 205, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.205) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.205) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.205) + renamable $r5 = t2ORRri renamable $r3, 206, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.206) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.206) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.206) + renamable $r5 = t2ORRri renamable $r3, 207, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.207) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.207) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.207) + renamable $r5 = t2ORRri renamable $r3, 208, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.208) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.208) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.208) + renamable $r5 = t2ORRri renamable $r3, 209, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.209) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.209) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.209) + renamable $r5 = t2ORRri renamable $r3, 210, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.210) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.210) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.210) + renamable $r5 = t2ORRri renamable $r3, 211, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.211) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.211) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.211) + renamable $r5 = t2ORRri renamable $r3, 212, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.212) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.212) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.212) + renamable $r5 = t2ORRri renamable $r3, 213, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.213) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.213) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.213) + renamable $r5 = t2ORRri renamable $r3, 214, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.214) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.214) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.214) + renamable $r5 = t2ORRri renamable $r3, 215, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.215) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.215) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.215) + renamable $r5 = t2ORRri renamable $r3, 216, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.216) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.216) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.216) + renamable $r5 = t2ORRri renamable $r3, 217, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.217) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.217) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.217) + renamable $r5 = t2ORRri renamable $r3, 218, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.218) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.218) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.218) + renamable $r5 = t2ORRri renamable $r3, 219, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.219) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.219) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.219) + renamable $r5 = t2ORRri renamable $r3, 220, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.220) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.220) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.220) + renamable $r5 = t2ORRri renamable $r3, 221, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.221) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.221) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.221) + renamable $r5 = t2ORRri renamable $r3, 222, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.222) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.222) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.222) + renamable $r5 = t2ORRri renamable $r3, 223, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.223) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.223) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.223) + renamable $r5 = t2ORRri renamable $r3, 224, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.224) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.224) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.224) + renamable $r5 = t2ORRri renamable $r3, 225, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.225) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.225) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.225) + renamable $r5 = t2ORRri renamable $r3, 226, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.226) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.226) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.226) + renamable $r5 = t2ORRri renamable $r3, 227, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.227) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.227) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.227) + renamable $r5 = t2ORRri renamable $r3, 228, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.228) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.228) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.228) + renamable $r5 = t2ORRri renamable $r3, 229, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.229) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.229) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.229) + renamable $r5 = t2ORRri renamable $r3, 230, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.230) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.230) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.230) + renamable $r5 = t2ORRri renamable $r3, 231, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.231) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.231) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.231) + renamable $r5 = t2ORRri renamable $r3, 232, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.232) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.232) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.232) + renamable $r5 = t2ORRri renamable $r3, 233, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.233) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.233) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.233) + renamable $r5 = t2ORRri renamable $r3, 234, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.234) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.234) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.234) + renamable $r5 = t2ORRri renamable $r3, 235, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.235) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.235) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.235) + renamable $r5 = t2ORRri renamable $r3, 236, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.236) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.236) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.236) + renamable $r5 = t2ORRri renamable $r3, 237, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.237) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.237) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.237) + renamable $r5 = t2ORRri renamable $r3, 238, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.238) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.238) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.238) + renamable $r5 = t2ORRri renamable $r3, 239, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.239) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.239) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.239) + renamable $r5 = t2ORRri renamable $r3, 240, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.240) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.240) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.240) + renamable $r5 = t2ORRri renamable $r3, 241, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.241) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.241) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.241) + renamable $r5 = t2ORRri renamable $r3, 242, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.242) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.242) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.242) + renamable $r5 = t2ORRri renamable $r3, 243, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.243) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.243) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.243) + renamable $r5 = t2ORRri renamable $r3, 244, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.244) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.244) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.244) + renamable $r5 = t2ORRri renamable $r3, 245, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.245) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.245) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.245) + renamable $r5 = t2ORRri renamable $r3, 246, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.246) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.246) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.246) + renamable $r5 = t2ORRri renamable $r3, 247, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.247) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.247) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.247) + renamable $r5 = t2ORRri renamable $r3, 248, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.248) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.248) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.248) + renamable $r5 = t2ORRri renamable $r3, 249, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.249) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.249) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.249) + renamable $r5 = t2ORRri renamable $r3, 250, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.250) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.250) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.250) + renamable $r5 = t2ORRri renamable $r3, 251, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.251) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.251) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.251) + renamable $r5 = t2ORRri renamable $r3, 252, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.252) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.252) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.252) + renamable $r5 = t2ORRri renamable $r3, 253, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.253) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.253) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.253) + renamable $r5 = t2ORRri renamable $r3, 254, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.254) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.254) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.254) + renamable $r5 = t2ORRri renamable $r3, 255, 14, $noreg, $noreg + renamable $r3 = nuw nsw t2ADDri killed renamable $r3, 256, 14, $noreg, $noreg + renamable $r12 = t2LDRs renamable $r1, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx.255) + renamable $r4 = t2LDRs renamable $r2, renamable $r5, 2, 14, $noreg :: (load 4 from %ir.arrayidx1.255) + renamable $r4 = nsw t2MUL killed renamable $r4, killed renamable $r12, 14, $noreg + t2STRs killed renamable $r4, renamable $r0, killed renamable $r5, 2, 14, $noreg :: (store 4 into %ir.arrayidx2.255) + renamable $lr = t2LoopDec killed renamable $lr, 1 + t2LoopEnd renamable $lr, %bb.1 + t2B %bb.2, 14, $noreg + + bb.2.for.cond.cleanup: + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r5, def $r7, def $pc + +... Index: test/Transforms/HardwareLoops/ARM/revert-after-call.mir =================================================================== --- /dev/null +++ test/Transforms/HardwareLoops/ARM/revert-after-call.mir @@ -0,0 +1,141 @@ +# RUN: llc -mtriple=thumbv8.1m.main %s -o - | FileCheck %s + +# CHECK: .LBB0_2: +# CHECK: sub.w lr, lr, #1 +# CHECK: mov [[TMP:r[0-9]+]], lr +# CHECK: bl bar +# CHECK: mov lr, [[TMP]] +# CHECK: cmp.w lr, #0 +# CHECK: bne .LBB0_2 + +--- | + target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "thumbv8.1m.main-arm-none-eabi" + + define i32 @skip_call(i32 %n) #0 { + entry: + %cmp6 = icmp eq i32 %n, 0 + br i1 %cmp6, label %while.end, label %while.body.preheader + + while.body.preheader: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %n) + br label %while.body + + while.body: ; preds = %while.body, %while.body.preheader + %res.07 = phi i32 [ %add, %while.body ], [ 0, %while.body.preheader ] + %0 = phi i32 [ %n, %while.body.preheader ], [ %1, %while.body ] + %call = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() + %add = add nsw i32 %call, %res.07 + %1 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %2 = icmp ne i32 %1, 0 + br i1 %2, label %while.body, label %while.end + + while.end: ; preds = %while.body, %entry + %res.0.lcssa = phi i32 [ 0, %entry ], [ %add, %while.body ] + ret i32 %res.0.lcssa + } + + declare i32 @bar(...) local_unnamed_addr #0 + declare void @llvm.set.loop.iterations.i32(i32) #1 + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1 + declare void @llvm.stackprotector(i8*, i8**) #2 + + attributes #0 = { "target-features"="+mve.fp" } + attributes #1 = { noduplicate nounwind } + attributes #2 = { nounwind } + +... +--- +name: skip_call +alignment: 1 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 16 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: true + hasCalls: true + stackProtector: '' + maxCallFrameSize: 0 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x30000000), %bb.3(0x50000000) + liveins: $r0, $r4, $r5, $r7, $lr + + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr + frame-setup CFI_INSTRUCTION def_cfa_offset 16 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + frame-setup CFI_INSTRUCTION offset $r5, -12 + frame-setup CFI_INSTRUCTION offset $r4, -16 + t2CMPri $r0, 0, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.1, 0, killed $cpsr + + bb.3.while.body.preheader: + successors: %bb.4(0x80000000) + liveins: $r0 + + $lr = tMOVr $r0, 14, $noreg + renamable $r4 = t2MOVi 0, 14, $noreg, $noreg + t2DoLoopStart killed $r0 + + bb.4.while.body: + successors: %bb.4(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $r4 + + renamable $lr = t2LoopDec killed renamable $lr, 1 + $r5 = tMOVr killed $lr, 14, $noreg + tBL 14, $noreg, @bar, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $r0 + $lr = tMOVr killed $r5, 14, $noreg + renamable $r4 = nsw t2ADDrr killed renamable $r0, killed renamable $r4, 14, $noreg, $noreg + t2LoopEnd renamable $lr, %bb.4 + t2B %bb.2, 14, $noreg + + bb.2.while.end: + liveins: $r4 + + $r0 = tMOVr killed $r4, 14, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0 + + bb.1: + renamable $r4 = t2MOVi 0, 14, $noreg, $noreg + $r0 = tMOVr killed $r4, 14, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0 + +... Index: test/Transforms/HardwareLoops/ARM/revert-after-spill.mir =================================================================== --- /dev/null +++ test/Transforms/HardwareLoops/ARM/revert-after-spill.mir @@ -0,0 +1,139 @@ +# RUN: llc -mtriple=thumbv8.1m.main %s -o - | FileCheck %s + +# CHECK: .LBB0_2: +# CHECK: sub.w lr, lr, #1 +# CHECK: str.w lr, [sp, #12] +# CHECK: ldr.w lr, [sp, #12] +# CHECK: cmp.w lr, #0 +# CHECK: bne .LBB0_2 + +--- | + target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "thumbv8.1m.main-arm-none-eabi" + + define i32 @skip_spill(i32 %n) #0 { + entry: + %cmp6 = icmp eq i32 %n, 0 + br i1 %cmp6, label %while.end, label %while.body.preheader + + while.body.preheader: ; preds = %entry + call void @llvm.set.loop.iterations.i32(i32 %n) + br label %while.body + + while.body: ; preds = %while.body, %while.body.preheader + %res.07 = phi i32 [ %add, %while.body ], [ 0, %while.body.preheader ] + %0 = phi i32 [ %n, %while.body.preheader ], [ %1, %while.body ] + %call = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() + %add = add nsw i32 %call, %res.07 + %1 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %2 = icmp ne i32 %1, 0 + br i1 %2, label %while.body, label %while.end + + while.end: ; preds = %while.body, %entry + %res.0.lcssa = phi i32 [ 0, %entry ], [ %add, %while.body ] + ret i32 %res.0.lcssa + } + + declare i32 @bar(...) local_unnamed_addr #0 + declare void @llvm.set.loop.iterations.i32(i32) #1 + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1 + declare void @llvm.stackprotector(i8*, i8**) #2 + + attributes #0 = { "target-features"="+mve.fp" } + attributes #1 = { noduplicate nounwind } + attributes #2 = { nounwind } + +... +--- +name: skip_spill +alignment: 1 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 16 + offsetAdjustment: 0 + maxAlignment: 4 + adjustsStack: true + hasCalls: true + stackProtector: '' + maxCallFrameSize: 0 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x30000000), %bb.3(0x50000000) + liveins: $r0, $r4, $r5, $r7, $lr + + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r4, killed $r5, killed $r7, killed $lr + frame-setup CFI_INSTRUCTION def_cfa_offset 16 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + frame-setup CFI_INSTRUCTION offset $r5, -12 + frame-setup CFI_INSTRUCTION offset $r4, -16 + t2CMPri $r0, 0, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.1, 0, killed $cpsr + + bb.3.while.body.preheader: + successors: %bb.4(0x80000000) + liveins: $r0 + + $lr = tMOVr $r0, 14, $noreg + renamable $r4 = t2MOVi 0, 14, $noreg, $noreg + t2DoLoopStart killed $r0 + + bb.4.while.body: + successors: %bb.4(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $r4 + + renamable $lr = t2LoopDec killed renamable $lr, 1 + t2STRi12 $lr, %stack.0, 0, 14, $noreg :: (store 4) + $lr = t2LDRi12 %stack.0, 0, 14, $noreg :: (load 4) + renamable $r4 = nsw t2ADDrr renamable $lr, killed renamable $r4, 14, $noreg, $noreg + t2LoopEnd renamable $lr, %bb.4 + t2B %bb.2, 14, $noreg + + bb.2.while.end: + liveins: $r4 + + $r0 = tMOVr killed $r4, 14, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0 + + bb.1: + renamable $r4 = t2MOVi 0, 14, $noreg, $noreg + $r0 = tMOVr killed $r4, 14, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r5, def $r7, def $pc, implicit killed $r0 + +... Index: test/Transforms/HardwareLoops/ARM/simple-do.ll =================================================================== --- test/Transforms/HardwareLoops/ARM/simple-do.ll +++ test/Transforms/HardwareLoops/ARM/simple-do.ll @@ -1,6 +1,7 @@ ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -hardware-loops -disable-arm-loloops=false %s -S -o - | FileCheck %s ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -hardware-loops -disable-arm-loloops=true %s -S -o - | FileCheck %s --check-prefix=DISABLED ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=-lob -hardware-loops %s -S -o - | FileCheck %s --check-prefix=DISABLED +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -disable-arm-loloops=false %s -o - | FileCheck %s --check-prefix=CHECK-LLC ; DISABLED-NOT: llvm.set.loop.iterations ; DISABLED-NOT: llvm.loop.decrement @@ -15,6 +16,15 @@ ; CHECK: [[LOOP_DEC]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[REM]], i32 1) ; CHECK: [[CMP:%[^ ]+]] = icmp ne i32 [[LOOP_DEC]], 0 ; CHECK: br i1 [[CMP]], label %while.body, label %while.end + +; CHECK-LLC-LABEL:do_copy: +; CHECK-LLC-NOT: mov lr, r0 +; CHECK-LLC: dls lr, r0 +; CHECK-LLC-NOT: mov lr, r0 +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9_]+]]: +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b [[LOOP_EXIT:\.LBB[0-9._]+]] +; CHECK-LLC: @ %while.end define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) { entry: br label %while.body @@ -45,6 +55,14 @@ ; CHECK: [[CMP:%[^ ]+]] = icmp ne i32 [[LOOP_DEC]], 0 ; CHECK: br i1 [[CMP]], label %while.body, label %while.end.loopexit +; CHECK-LLC-LABEL:do_inc1: +; CHECK-LLC: dls lr, +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9_]+]]: +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b [[LOOP_EXIT:\.LBB[0-9_]+]] +; CHECK-LLC: [[LOOP_EXIT:\.LBB[0-9_]+]]: + define i32 @do_inc1(i32 %n) { entry: %cmp7 = icmp eq i32 %n, 0 @@ -84,6 +102,16 @@ ; CHECK: [[LOOP_DEC]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[REM]], i32 1) ; CHECK: [[CMP:%[^ ]+]] = icmp ne i32 [[LOOP_DEC]], 0 ; CHECK: br i1 [[CMP]], label %while.body, label %while.end.loopexit + +; CHECK-LLC: do_inc2: +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: dls lr, +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9._]+]]: +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b [[LOOP_EXIT:\.LBB[0-9._]+]] +; CHECK-LLC: [[LOOP_EXIT:\.LBB[0-9_]+]]: + define i32 @do_inc2(i32 %n) { entry: %cmp7 = icmp sgt i32 %n, 0 @@ -127,6 +155,15 @@ ; CHECK: [[LOOP_DEC]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[REM]], i32 1) ; CHECK: [[CMP:%[^ ]+]] = icmp ne i32 [[LOOP_DEC]], 0 ; CHECK: br i1 [[CMP]], label %while.body, label %while.end.loopexit + +; CHECK-LLC: do_dec2 +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: dls lr, +; CHECK-LLC-NOT: mov lr, +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9_]+]]: +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b . +; CHECK-LLC: @ %while.end define i32 @do_dec2(i32 %n) { entry: %cmp6 = icmp sgt i32 %n, 0 Index: test/Transforms/HardwareLoops/ARM/structure.ll =================================================================== --- test/Transforms/HardwareLoops/ARM/structure.ll +++ test/Transforms/HardwareLoops/ARM/structure.ll @@ -1,4 +1,6 @@ ; RUN: opt -mtriple=thumbv8.1m.main-arm-none-eabi -hardware-loops -disable-arm-loloops=false %s -S -o - | FileCheck %s +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -disable-arm-loloops=false %s -o - | FileCheck %s --check-prefix=CHECK-LLC +; RUN: opt -mtriple=thumbv8.1m.main -loop-unroll -unroll-remainder=false -S < %s | llc -mtriple=thumbv8.1m.main -disable-arm-loloops=false | FileCheck %s --check-prefix=CHECK-UNROLL ; CHECK-LABEL: early_exit ; CHECK-NOT: llvm.set.loop.iterations @@ -43,6 +45,16 @@ ; CHECK-NOT: [[LOOP_DEC1:%[^ ]+]] = call i1 @llvm.loop.decrement.i32(i32 1) ; CHECK-NOT: br i1 [[LOOP_DEC1]], label %while.cond1.preheader.us, label %while.end7 + +; CHECK-LLC: nested: +; CHECK-LLC-NOT: mov lr, r1 +; CHECK-LLC: dls lr, r1 +; CHECK-LLC-NOT: mov lr, r1 +; CHECK-LLC: [[LOOP_HEADER:\.LBB[0-9._]+]]: +; CHECK-LLC: le lr, [[LOOP_HEADER]] +; CHECK-LLC-NOT: b [[LOOP_EXIT:\.LBB[0-9._]+]] +; CHECK-LLC: [[LOOP_EXIT:\.LBB[0-9._]+]]: + define void @nested(i32* nocapture %A, i32 %N) { entry: %cmp20 = icmp eq i32 %N, 0 @@ -210,6 +222,171 @@ ret void } +; CHECK-LABEL: search +; CHECK: for.body.preheader: +; CHECK: call void @llvm.set.loop.iterations.i32(i32 %N) +; CHECK: br label %for.body +; CHECK: for.body: +; CHECK: for.inc: +; CHECK: [[LOOP_DEC:%[^ ]+]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32 +; CHECK: [[CMP:%[^ ]+]] = icmp ne i32 [[LOOP_DEC]], 0 +; CHECK: br i1 [[CMP]], label %for.body, label %for.cond.cleanup +define i32 @search(i8* nocapture readonly %c, i32 %N) { +entry: + %cmp11 = icmp eq i32 %N, 0 + br i1 %cmp11, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + %found.0.lcssa = phi i32 [ 0, %entry ], [ %found.1, %for.inc ] + %spaces.0.lcssa = phi i32 [ 0, %entry ], [ %spaces.1, %for.inc ] + %sub = sub nsw i32 %found.0.lcssa, %spaces.0.lcssa + ret i32 %sub + +for.body: + %i.014 = phi i32 [ %inc3, %for.inc ], [ 0, %entry ] + %spaces.013 = phi i32 [ %spaces.1, %for.inc ], [ 0, %entry ] + %found.012 = phi i32 [ %found.1, %for.inc ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i8, i8* %c, i32 %i.014 + %0 = load i8, i8* %arrayidx, align 1 + switch i8 %0, label %for.inc [ + i8 108, label %sw.bb + i8 111, label %sw.bb + i8 112, label %sw.bb + i8 32, label %sw.bb1 + ] + +sw.bb: ; preds = %for.body, %for.body, %for.body + %inc = add nsw i32 %found.012, 1 + br label %for.inc + +sw.bb1: ; preds = %for.body + %inc2 = add nsw i32 %spaces.013, 1 + br label %for.inc + +for.inc: ; preds = %sw.bb, %sw.bb1, %for.body + %found.1 = phi i32 [ %found.012, %for.body ], [ %found.012, %sw.bb1 ], [ %inc, %sw.bb ] + %spaces.1 = phi i32 [ %spaces.013, %for.body ], [ %inc2, %sw.bb1 ], [ %spaces.013, %sw.bb ] + %inc3 = add nuw i32 %i.014, 1 + %exitcond = icmp eq i32 %inc3, %N + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: unroll_inc_int +; CHECK: call void @llvm.set.loop.iterations.i32(i32 %N) +; CHECK: call i32 @llvm.loop.decrement.reg.i32.i32.i32( + +; TODO: We should be able to support the unrolled loop body. +; CHECK-UNROLL-LABEL: unroll_inc_int: +; CHECK-UNROLL: [[PREHEADER:.LBB[0-9_]+]]: @ %for.body.preheader +; CHECK-UNROLL-NOT: dls +; CHECK-UNROLL: [[LOOP:.LBB[0-9_]+]]: @ %for.body +; CHECK-UNROLL-NOT: le lr, [[LOOP]] +; CHECK-UNROLL: bne [[LOOP]] +; CHECK-UNROLL: %for.body.epil.preheader +; CHECK-UNROLL: dls +; CHECK-UNROLL: %for.body.epil +; CHECK-UNROLL: le + +define void @unroll_inc_int(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) { +entry: + %cmp8 = icmp sgt i32 %N, 0 + br i1 %cmp8, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %c, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %mul = mul nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 %i.09 + store i32 %mul, i32* %arrayidx2, align 4 + %inc = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %inc, %N + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: unroll_inc_unsigned +; CHECK: call void @llvm.set.loop.iterations.i32(i32 %N) +; CHECK: call i32 @llvm.loop.decrement.reg.i32.i32.i32( + +; CHECK-LLC-LABEL: unroll_inc_unsigned: +; CHECK-LLC: dls lr, [[COUNT:r[0-9]+]] +; CHECK-LLC: le lr + +; TODO: We should be able to support the unrolled loop body. +; CHECK-UNROLL-LABEL: unroll_inc_unsigned: +; CHECK-UNROLL: [[PREHEADER:.LBB[0-9_]+]]: @ %for.body.preheader +; CHECK-UNROLL-NOT: dls +; CHECK-UNROLL: [[LOOP:.LBB[0-9_]+]]: @ %for.body +; CHECK-UNROLL-NOT: le lr, [[LOOP]] +; CHECK-UNROLL: bne [[LOOP]] +; CHECK-UNROLL: %for.body.epil.preheader +; CHECK-UNROLL: dls +; CHECK-UNROLL: %for.body.epil +; CHECK-UNROLL: le +define void @unroll_inc_unsigned(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) { +entry: + %cmp8 = icmp eq i32 %N, 0 + br i1 %cmp8, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %c, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %mul = mul nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 %i.09 + store i32 %mul, i32* %arrayidx2, align 4 + %inc = add nuw i32 %i.09, 1 + %exitcond = icmp eq i32 %inc, %N + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +; CHECK-LABEL: unroll_dec_int +; CHECK: call void @llvm.set.loop.iterations.i32(i32 %N) +; CHECK: call i32 @llvm.loop.decrement.reg.i32.i32.i32( + +; TODO: An unnecessary register is being held to hold COUNT, lr should just +; be used instead. +; CHECK-LLC-LABEL: unroll_dec_int: +; CHECK-LLC: dls lr, [[COUNT:r[0-9]+]] +; CHECK-LLC: subs [[COUNT]], #1 +; CHECK-LLC: le lr + +; CHECK-UNROLL-LABEL: unroll_dec_int +; CHECK-UNROLL: dls lr +; CHECK-UNROLL: le lr +; CHECK-UNROLL: dls lr +; CHECK-UNROLL: le lr +define void @unroll_dec_int(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) { +entry: + %cmp8 = icmp sgt i32 %N, 0 + br i1 %cmp8, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ %dec, %for.body ], [ %N, %entry ] + %arrayidx = getelementptr inbounds i32, i32* %b, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %c, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %mul = mul nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 %i.09 + store i32 %mul, i32* %arrayidx2, align 4 + %dec = add nsw i32 %i.09, -1 + %cmp = icmp sgt i32 %dec, 0 + br i1 %cmp, label %for.body, label %for.cond.cleanup +} declare void @llvm.set.loop.iterations.i32(i32) #0 declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0 Index: test/Transforms/HardwareLoops/ARM/switch.mir =================================================================== --- /dev/null +++ test/Transforms/HardwareLoops/ARM/switch.mir @@ -0,0 +1,198 @@ +# RUN: llc -mtriple=thumbv8.1m.main %s -run-pass=arm-finalize-loops -o - +# CHECK: bb.1.for.body.preheader: +# CHECK: $lr = t2DLS +# CHECK-NOT: t2LoopDec +# CHECK: bb.6.for.inc: +# CHECK: $lr = t2LEUpdate renameable $lr, %bb.2 + +--- | + target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "thumbv8.1m.main-unknown-unknown" + + ; Function Attrs: norecurse nounwind readonly + define dso_local arm_aapcscc i32 @search(i8* nocapture readonly %c, i32 %N) local_unnamed_addr #0 { + entry: + %cmp11 = icmp eq i32 %N, 0 + br i1 %cmp11, label %for.cond.cleanup, label %for.body.preheader + + for.body.preheader: + call void @llvm.set.loop.iterations.i32(i32 %N) + br label %for.body + + for.cond.cleanup: + %found.0.lcssa = phi i32 [ 0, %entry ], [ %found.1, %for.inc ] + %spaces.0.lcssa = phi i32 [ 0, %entry ], [ %spaces.1, %for.inc ] + %sub = sub nsw i32 %found.0.lcssa, %spaces.0.lcssa + ret i32 %sub + + for.body: + %lsr.iv1 = phi i8* [ %c, %for.body.preheader ], [ %scevgep, %for.inc ] + %spaces.013 = phi i32 [ %spaces.1, %for.inc ], [ 0, %for.body.preheader ] + %found.012 = phi i32 [ %found.1, %for.inc ], [ 0, %for.body.preheader ] + %0 = phi i32 [ %N, %for.body.preheader ], [ %3, %for.inc ] + %1 = load i8, i8* %lsr.iv1, align 1 + %2 = zext i8 %1 to i32 + switch i32 %2, label %for.inc [ + i32 108, label %sw.bb + i32 111, label %sw.bb + i32 112, label %sw.bb + i32 32, label %sw.bb1 + ] + + sw.bb: + %inc = add nsw i32 %found.012, 1 + br label %for.inc + + sw.bb1: + %inc2 = add nsw i32 %spaces.013, 1 + br label %for.inc + + for.inc: + %found.1 = phi i32 [ %found.012, %for.body ], [ %found.012, %sw.bb1 ], [ %inc, %sw.bb ] + %spaces.1 = phi i32 [ %spaces.013, %for.body ], [ %inc2, %sw.bb1 ], [ %spaces.013, %sw.bb ] + %scevgep = getelementptr i8, i8* %lsr.iv1, i32 1 + %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %4 = icmp ne i32 %3, 0 + br i1 %4, label %for.body, label %for.cond.cleanup + } + + declare void @llvm.set.loop.iterations.i32(i32) #1 + declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1 + declare void @llvm.stackprotector(i8*, i8**) #2 + + attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+ras,+soft-float,+strict-align,+thumb-mode,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8d16,-fp-armv8d16sp,-fp-armv8sp,-fp16,-fp16fml,-fp64,-fpregs,-fullfp16,-neon,-vfp2,-vfp2d16,-vfp2d16sp,-vfp2sp,-vfp3,-vfp3d16,-vfp3d16sp,-vfp3sp,-vfp4,-vfp4d16,-vfp4d16sp,-vfp4sp" "unsafe-fp-math"="false" "use-soft-float"="true" } + attributes #1 = { noduplicate nounwind } + attributes #2 = { nounwind } + +... +--- +name: search +alignment: 1 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +failedISel: false +tracksRegLiveness: true +hasWinCFI: false +registers: [] +liveins: + - { reg: '$r0', virtual-reg: '' } + - { reg: '$r1', virtual-reg: '' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 16 + offsetAdjustment: -8 + maxAlignment: 4 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + cvBytesOfCalleeSavedRegisters: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: [] +stack: + - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } + - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4, + stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true, + debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } +constants: [] +machineFunctionInfo: {} +body: | + bb.0.entry: + successors: %bb.1(0x30000000), %bb.3(0x50000000) + liveins: $r0, $r1, $r4, $r6, $lr + + $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r4, killed $r6, $r7, killed $lr + frame-setup CFI_INSTRUCTION def_cfa_offset 16 + frame-setup CFI_INSTRUCTION offset $lr, -4 + frame-setup CFI_INSTRUCTION offset $r7, -8 + frame-setup CFI_INSTRUCTION offset $r6, -12 + frame-setup CFI_INSTRUCTION offset $r4, -16 + $r7 = frame-setup t2ADDri $sp, 8, 14, $noreg, $noreg + frame-setup CFI_INSTRUCTION def_cfa $r7, 8 + t2CMPri $r1, 0, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.1, 0, killed $cpsr + + bb.3.for.body.preheader: + successors: %bb.4(0x80000000) + liveins: $r0, $r1 + + $lr = tMOVr $r1, 14, $noreg + t2DoLoopStart killed $r1 + renamable $r1 = t2MOVi 0, 14, $noreg, $noreg + renamable $r12 = t2MOVi 1, 14, $noreg, $noreg + renamable $r2 = t2MOVi 0, 14, $noreg, $noreg + + bb.4.for.body: + successors: %bb.5(0x26666665), %bb.6(0x5999999b) + liveins: $lr, $r0, $r1, $r2, $r12 + + renamable $r3 = t2LDRBi12 renamable $r0, 0, 14, $noreg :: (load 1 from %ir.lsr.iv1) + renamable $r4 = t2SUBri renamable $r3, 108, 14, $noreg, $noreg + renamable $lr = t2LoopDec killed renamable $lr, 1 + t2CMPri renamable $r4, 4, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.5, 8, killed $cpsr + + bb.6.for.body: + successors: %bb.7(0x6db6db6e), %bb.5(0x12492492) + liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r12 + + renamable $r4 = t2LSLrr renamable $r12, killed renamable $r4, 14, $noreg, $noreg + t2TSTri killed renamable $r4, 25, 14, $noreg, implicit-def $cpsr + t2Bcc %bb.5, 0, killed $cpsr + + bb.7.sw.bb: + successors: %bb.8(0x80000000) + liveins: $lr, $r0, $r1, $r2, $r12 + + renamable $r2 = nsw t2ADDri killed renamable $r2, 1, 14, $noreg, $noreg + t2B %bb.8, 14, $noreg + + bb.5.for.body: + successors: %bb.8(0x80000000) + liveins: $lr, $r0, $r1, $r2, $r3, $r12 + + t2CMPri killed renamable $r3, 32, 14, $noreg, implicit-def $cpsr + BUNDLE implicit-def dead $itstate, implicit-def $r1, implicit killed $r1, implicit killed $cpsr { + t2IT 0, 8, implicit-def $itstate + renamable $r1 = nsw t2ADDri killed renamable $r1, 1, 0, killed $cpsr, $noreg, implicit $r1, implicit internal killed $itstate + } + + bb.8.for.inc: + successors: %bb.4(0x7c000000), %bb.2(0x04000000) + liveins: $lr, $r0, $r1, $r2, $r12 + + renamable $r0 = t2ADDri killed renamable $r0, 1, 14, $noreg, $noreg + t2LoopEnd renamable $lr, %bb.4 + t2B %bb.2, 14, $noreg + + bb.2.for.cond.cleanup: + liveins: $r1, $r2 + + renamable $r0 = nsw t2SUBrr killed renamable $r2, killed renamable $r1, 14, $noreg, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r6, def $r7, def $pc, implicit killed $r0 + + bb.1: + renamable $r2 = t2MOVi 0, 14, $noreg, $noreg + renamable $r1 = t2MOVi 0, 14, $noreg, $noreg + renamable $r0 = nsw t2SUBrr killed renamable $r2, killed renamable $r1, 14, $noreg, $noreg + $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r6, def $r7, def $pc, implicit killed $r0 + +...