diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -233,6 +233,15 @@ cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time")); +// This is currently disabled by default due to PR51869 (the analysis might end +// up in an infinite loop). +static cl::opt ProveImplicationsOfDifferentTypeViaTruncation( + "scalar-evolution-prove-implications-via-truncation", cl::Hidden, + cl::init(false), + cl::desc("If we can prove that wider type values actually fit in the " + "narrow type, we truncate them and prove the implication in " + "narrow type.")); + //===----------------------------------------------------------------------===// // SCEV class definitions //===----------------------------------------------------------------------===// @@ -10647,7 +10656,8 @@ // For unsigned and equality predicates, try to prove that both found // operands fit into narrow unsigned range. If so, try to prove facts in // narrow types. - if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) { + if (ProveImplicationsOfDifferentTypeViaTruncation && + !CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) { auto *NarrowType = LHS->getType(); auto *WideType = FoundLHS->getType(); auto BitWidth = getTypeSizeInBits(NarrowType); diff --git a/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll b/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Analysis/ScalarEvolution/pr51869-scalar-evolution-prove-implications-via-truncation.ll @@ -0,0 +1,5636 @@ +; RUN: opt < %s -disable-output -passes=indvars + +; Do not timeout (and do not crash). +; Adding; -scalar-evolution-prove-implications-via-truncation used to result +; in this test case hanging (or taking lots of time to run). + +%struct.s_2 = type { double, double, i24, { float, float } } + +@v_1 = external dso_local global { i16, i16 }, align 1 +@v_2 = external dso_local global { i16, i16 }*, align 1 +@v_3 = external dso_local global [6 x { i16, i16 }*], align 1 +@v_4 = external dso_local global i64, align 1 +@v_6 = external dso_local global i16, align 1 +@v_7 = external dso_local global i16, align 1 +@v_8 = external dso_local global i16*, align 1 +@v_9 = external dso_local global i16**, align 1 +@v_12 = external dso_local global i24, align 1 +@v_14 = external dso_local global i16, align 1 +@v_15 = external dso_local global { i24, i24 }, align 1 +@v_16 = external dso_local global i32, align 1 +@v_18 = external dso_local global [9 x { i24, i24 }], align 1 +@v_19 = external dso_local global { i64, i64 }, align 1 +@v_21 = external dso_local global { i64, i64 }**, align 1 +@v_22 = external dso_local global { float, float }, align 1 +@v_32 = external dso_local global i40, align 1 +@v_37 = external dso_local global { i24, i24 }, align 1 +@v_40 = external dso_local global { i16, i16 }, align 1 +@v_41 = external dso_local global { i16, i16 }*, align 1 +@v_43 = external dso_local global { i32, i32 }, align 1 +@v_47 = external dso_local global i32, align 1 +@v_48 = external dso_local global i16, align 1 +@v_57 = external dso_local global i16, align 1 +@v_58 = external dso_local global { i64, i64 }, align 1 +@v_64 = external dso_local global { i32, i32 }, align 1 +@v_65 = external dso_local global { i32, i32 }*, align 1 +@v_69 = external dso_local global i16, align 1 +@v_71 = external dso_local global i16, align 1 +@v_73 = external dso_local global i64, align 1 +@v_75 = external dso_local global i64, align 1 +@v_76 = external dso_local global i64, align 1 +@v_77 = external dso_local global { i32, i32 }, align 1 +@v_89 = external dso_local global i64, align 1 +@v_92 = external dso_local global i64, align 1 +@v_95 = external dso_local global i64, align 1 +@v_97 = external dso_local global i64, align 1 +@v_98 = external dso_local global [40 x i64], align 1 +@v_99 = external dso_local global i64, align 1 +@v_100 = external dso_local global [40 x i64], align 1 +@v_101 = external dso_local global i64, align 1 +@v_102 = external dso_local global [40 x i64], align 1 +@v_104 = external dso_local global i32, align 1 +@v_110 = external dso_local global { i16, i16 }, align 1 +@v_111 = external dso_local global i16, align 1 +@v_118 = external dso_local global i32, align 1 +@v_119 = external dso_local global [6 x i32], align 1 +@v_120 = external dso_local global i32, align 1 +@v_122 = external dso_local constant i24, align 1 +@v_125 = external dso_local global i24, align 1 +@v_126 = external dso_local global i64, align 1 +@v_128 = external dso_local global i24, align 1 +@v_131 = external dso_local constant double, align 1 +@v_133 = external dso_local constant double, align 1 +@v_134 = external dso_local constant double, align 1 +@v_137 = external dso_local global i24, align 1 +@v_139 = external dso_local global i16, align 1 +@v_142 = external dso_local global i16, align 1 +@v_146 = external dso_local global { i24, i24 }, align 1 +@v_152 = external dso_local constant { float, float }, align 1 +@v_154 = external dso_local global { i16, i16 }, align 1 +@v_156 = external dso_local global i40, align 1 +@v_161 = external dso_local constant i24, align 1 +@v_163 = external dso_local constant i24, align 1 +@v_168 = external dso_local global i16, align 1 +@v_181 = external dso_local global i24, align 1 +@v_183 = external dso_local global i16, align 1 +@v_187 = external dso_local global { i64, i64 }, align 1 +@v_198 = external dso_local global i24, align 1 +@v_199 = external dso_local global [8 x i24], align 1 +@v_200 = external dso_local global i64, align 1 +@v_203 = external dso_local global i32, align 1 +@v_205 = external dso_local global i16, align 1 +@v_206 = external dso_local global i16, align 1 +@v_207 = external dso_local global double, align 1 +@v_208 = external dso_local global i16, align 1 +@v_213 = external dso_local global { float, float }*, align 1 +@v_214 = external dso_local global float, align 1 +@v_215 = external dso_local global i32, align 1 +@v_216 = external dso_local global i32, align 1 +@v_218 = external dso_local global i16, align 1 +@v_221 = external dso_local global i16***, align 1 +@v_222 = external dso_local global i64, align 1 +@v_224 = external dso_local global i64**, align 1 +@v_225 = external dso_local global i40, align 1 +@v_227 = external dso_local global [5 x { i16, i16 }], align 1 +@v_228 = external dso_local global i32, align 1 +@v_229 = external dso_local global { float, float }, align 1 +@v_230 = external dso_local constant i24, align 1 +@v_231 = external dso_local global { i32, i32 }, align 1 +@v_239 = external dso_local global i16, align 1 +@v_240 = external dso_local global i32, align 1 +@v_242 = external dso_local global i32, align 1 +@v_243 = external dso_local global i24, align 1 +@v_244 = external dso_local global i24, align 1 +@v_245 = external dso_local global i32, align 1 +@v_246 = external dso_local global float, align 1 +@v_247 = external dso_local global i16, align 1 +@v_248 = external dso_local global float, align 1 +@v_252 = external dso_local global float, align 1 +@v_253 = external dso_local global [3 x float], align 1 +@v_255 = external dso_local global i64, align 1 +@v_258 = external dso_local global float, align 1 +@v_263 = external dso_local global float, align 1 +@v_264 = external dso_local global float, align 1 +@v_266 = external dso_local global i24, align 1 +@v_268 = external dso_local global i64, align 1 +@v_269 = external dso_local global i16, align 1 +@v_278 = external dso_local global i16, align 1 +@v_279 = external dso_local global { i64, i64 }, align 1 +@v_285 = external dso_local global { i32, i32 }, align 1 +@v_286 = external dso_local global { i32, i32 }*, align 1 +@v_290 = external dso_local global i16, align 1 +@v_292 = external dso_local global i16, align 1 +@v_294 = external dso_local global i64, align 1 +@v_296 = external dso_local global i64, align 1 +@v_297 = external dso_local global i64, align 1 +@v_306 = external dso_local global { i64, i64 }, align 1 +@v_310 = external dso_local global i64, align 1 +@v_313 = external dso_local global i64, align 1 +@v_316 = external dso_local global i64, align 1 +@v_318 = external dso_local global i64, align 1 +@v_319 = external dso_local global [40 x i64], align 1 +@v_320 = external dso_local global i64, align 1 +@v_321 = external dso_local global [40 x i64], align 1 +@v_322 = external dso_local global i64, align 1 +@v_323 = external dso_local global [40 x i64], align 1 +@v_324 = external dso_local global i64, align 1 +@v_325 = external dso_local global [40 x i64], align 1 +@v_332 = external dso_local global { i24, i24 }, align 1 +@v_334 = external dso_local global { i16, i16 }, align 1 +@v_335 = external dso_local global i16, align 1 +@v_342 = external dso_local global i32, align 1 +@v_343 = external dso_local global [6 x i32], align 1 +@v_344 = external dso_local global i32, align 1 +@v_346 = external dso_local constant i24, align 1 +@v_349 = external dso_local global i24, align 1 +@v_350 = external dso_local global i64, align 1 +@v_352 = external dso_local global i24, align 1 +@v_355 = external dso_local constant double, align 1 +@v_357 = external dso_local constant double, align 1 +@v_358 = external dso_local constant double, align 1 +@v_361 = external dso_local global i24, align 1 +@v_363 = external dso_local global i16, align 1 +@v_366 = external dso_local global i16, align 1 +@v_367 = external dso_local global { i24, i24 }, align 1 +@v_372 = external dso_local global { i16, i16 }, align 1 +@v_378 = external dso_local global { i16, i16 }, align 1 +@v_380 = external dso_local global i40, align 1 +@v_385 = external dso_local constant i24, align 1 +@v_387 = external dso_local constant i24, align 1 +@v_392 = external dso_local global i16, align 1 +@v_399 = external dso_local global { i24, i24 }, align 1 +@v_403 = external dso_local global i24, align 1 +@v_405 = external dso_local global i16, align 1 +@v_417 = external dso_local global { i16, i16 }, align 1 +@v_420 = external dso_local global i24, align 1 +@v_421 = external dso_local global [8 x i24], align 1 +@v_422 = external dso_local global i64, align 1 +@v_425 = external dso_local global i32, align 1 +@v_427 = external dso_local global i16, align 1 +@v_428 = external dso_local global i64, align 1 +@v_429 = external dso_local global i24, align 1 +@v_430 = external dso_local global i40, align 1 +@v_431 = external dso_local global i40, align 1 +@v_432 = external dso_local global double, align 1 +@v_434 = external dso_local global i40, align 1 +@v_436 = external dso_local global i24, align 1 +@v_437 = external dso_local global [32 x i24], align 1 +@v_439 = external dso_local global { i24, i24 }, align 1 +@v_440 = external dso_local global float, align 1 +@v_442 = external dso_local global float**, align 1 +@v_443 = external dso_local global i32, align 1 +@v_444 = external dso_local global [2 x i32], align 1 +@v_445 = external dso_local global [4 x [2 x i32]], align 1 +@v_447 = external dso_local global double, align 1 +@v_448 = external dso_local global [64 x double], align 1 +@v_449 = external dso_local global double, align 1 +@v_450 = external dso_local global [64 x double], align 1 +@v_451 = external dso_local global double, align 1 +@v_452 = external dso_local global [64 x double], align 1 +@v_453 = external dso_local global double, align 1 +@v_454 = external dso_local global [64 x double], align 1 +@v_456 = external dso_local global i64, align 1 +@v_457 = external dso_local global i64*, align 1 +@v_459 = external dso_local global { i64, i64 }*, align 1 +@v_462 = external dso_local global { i64, i64 }, align 1 +@v_463 = external dso_local global { i64, i64 }*, align 1 +@v_465 = external dso_local global { i64, i64 }***, align 1 +@v_467 = external dso_local global { i16, i16 }, align 1 +@v_470 = external dso_local global i16, align 1 +@v_471 = external dso_local global float, align 1 +@v_472 = external dso_local global [28 x float], align 1 +@v_473 = external dso_local global float, align 1 +@v_474 = external dso_local global [28 x float], align 1 +@v_475 = external dso_local global float, align 1 +@v_476 = external dso_local global [28 x float], align 1 +@v_477 = external dso_local global i64, align 1 +@v_478 = external dso_local global i64*, align 1 +@v_483 = external dso_local global i64, align 1 +@v_484 = external dso_local global [7 x i64], align 1 +@v_485 = external dso_local global i32, align 1 +@v_486 = external dso_local global i32*, align 1 +@v_487 = external dso_local global i32**, align 1 +@v_488 = external dso_local global [6 x i32**], align 1 +@v_489 = external dso_local global { i64, i64 }, align 1 +@v_492 = external dso_local global [2 x { i64, i64 }*]*, align 1 +@v_494 = external dso_local global i40, align 1 +@v_495 = external dso_local global i40*, align 1 +@v_497 = external dso_local global i16, align 1 +@v_499 = external dso_local global i16**, align 1 +@v_500 = external dso_local global { double, double }, align 1 +@v_501 = external dso_local global { double, double }*, align 1 +@v_503 = external dso_local global { double, double }***, align 1 +@v_504 = external dso_local global i32, align 1 +@v_506 = external dso_local global i32**, align 1 +@v_507 = external dso_local global i16, align 1 +@v_508 = external dso_local global [1 x i16], align 1 +@v_509 = external dso_local global i16, align 1 +@v_510 = external dso_local global [1 x i16], align 1 +@v_511 = external dso_local global i16, align 1 +@v_512 = external dso_local global [1 x i16], align 1 +@v_513 = external dso_local global i16, align 1 +@v_514 = external dso_local global [1 x i16], align 1 +@v_515 = external dso_local constant { i32, i32 }, align 1 +@v_516 = external dso_local global i64, align 1 +@v_517 = external dso_local global [6 x i64], align 1 +@v_518 = external dso_local global [6 x i64]*, align 1 +@v_519 = external dso_local global double, align 1 +@v_520 = external dso_local global double*, align 1 +@v_522 = external dso_local global float, align 1 +@v_523 = external dso_local global [2 x float], align 1 +@v_524 = external dso_local constant double, align 1 +@v_528 = external dso_local global double****, align 1 +@v_530 = external dso_local global i16, align 1 +@v_531 = external dso_local global i16, align 1 +@v_532 = external dso_local global i16*, align 1 +@v_533 = external dso_local constant i24, align 1 +@v_535 = external dso_local global i24**, align 1 +@v_538 = external dso_local global [3 x { i24, i24 }], align 1 +@v_539 = external dso_local global double, align 1 +@v_540 = external dso_local global double*, align 1 +@v_541 = external dso_local global { i40, i40 }, align 1 +@v_542 = external dso_local global i16, align 1 +@v_543 = external dso_local global i16*, align 1 +@v_544 = external dso_local global i16, align 1 +@v_545 = external dso_local global i64, align 1 +@v_547 = external dso_local global i64**, align 1 +@v_548 = external dso_local global i32, align 1 +@v_550 = external dso_local global i40, align 1 +@v_552 = external dso_local global i40**, align 1 +@v_553 = external dso_local global float, align 1 +@v_556 = external dso_local global [6 x float**], align 1 +@v_558 = external dso_local global { i40, i40 }, align 1 +@v_561 = external dso_local global { i16, i16 }, align 1 +@v_564 = external dso_local global { i16, i16 }*, align 1 +@v_566 = external dso_local global [1 x { i16, i16 }], align 1 +@v_571 = external dso_local global { double, double }****, align 1 +@v_574 = external dso_local global { i16, i16 }*, align 1 +@v_577 = external dso_local global [2 x { double, double }], align 1 +@v_579 = external dso_local global [2 x { double, double }], align 1 +@v_581 = external dso_local global [2 x { double, double }], align 1 +@v_583 = external dso_local global [2 x { double, double }], align 1 +@v_584 = external dso_local global i24, align 1 +@v_585 = external dso_local global [6 x i24], align 1 +@v_589 = external dso_local global { i24, i24 }, align 1 +@v_592 = external dso_local global { i24, i24 }***, align 1 +@v_597 = external dso_local global [9 x { i24, i24 }*]*, align 1 +@v_599 = external dso_local global { i16, i16 }*, align 1 +@v_600 = external dso_local global i32, align 1 +@v_612 = external dso_local global i16, align 1 +@v_624 = external dso_local global double, align 1 +@v_626 = external dso_local global double**, align 1 +@v_629 = external dso_local global i24, align 1 +@v_631 = external dso_local global [4 x i24*], align 1 +@v_632 = external dso_local global i64, align 1 +@v_633 = external dso_local global i64*, align 1 +@v_636 = external dso_local global { i16, i16 }**, align 1 +@v_638 = external dso_local constant double, align 1 +@v_639 = external dso_local global double*, align 1 + +declare dso_local void @modify_checksum(double) + +; Function Attrs: argmemonly nofree nosync nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0 + +; Function Attrs: argmemonly nofree nosync nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #0 + +; Function Attrs: nounwind +declare dso_local void @print_checksum() #1 + +; Function Attrs: nounwind readnone +declare i24 @llvm.cowabunga.sub.a24(i24, i24) #2 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i24 @llvm.smul.fix.i24(i24, i24, i32 immarg) #3 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i32 @llvm.smul.fix.i32(i32, i32, i32 immarg) #3 + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i40 @llvm.smul.fix.i40(i40, i40, i32 immarg) #3 + +; Function Attrs: nounwind readnone +declare i16 @llvm.cowabunga.add.i16(i16, i16) #2 + +; Function Attrs: nounwind readnone +declare i40 @llvm.cowabunga.sub.a40(i40, i40) #2 + +; Function Attrs: nounwind +define dso_local i16 @main() #1 { +entry: + %retval = alloca i16, align 1 + %v_11 = alloca i64, align 1 + %v_13 = alloca i24, align 1 + %v_211 = alloca { i64, i64 }, align 1 + %y0 = alloca i16, align 1 + %y011 = alloca i16, align 1 + %y1 = alloca i16, align 1 + %tmp = alloca i16, align 1 + %y123 = alloca i16, align 1 + %tmp24 = alloca i16, align 1 + %tmp34 = alloca { i64, i64 }, align 1 + %indirect-arg-temp = alloca { i24, i24 }, align 1 + %indirect-arg-temp35 = alloca { i64, i64 }, align 1 + %indirect-arg-temp36 = alloca { float, float }, align 1 + %indirect-arg-temp37 = alloca { float, float }, align 1 + %indirect-arg-temp38 = alloca { i64, i64 }, align 1 + %v_217 = alloca { i24, i24 }, align 1 + %indirect-arg-temp45 = alloca { i16, i16 }, align 1 + %indirect-arg-temp46 = alloca { float, float }, align 1 + %indirect-arg-temp47 = alloca { i32, i32 }, align 1 + %indirect-arg-temp48 = alloca { i40, i40 }, align 1 + %v_438 = alloca i16, align 1 + %v_446 = alloca { float, float }, align 1 + %v_loop_3 = alloca i16, align 1 + %v_455 = alloca double, align 1 + %v_460 = alloca i64, align 1 + %v_461 = alloca i32, align 1 + %y = alloca i32, align 1 + %tmp151 = alloca i32, align 1 + %v_466 = alloca double, align 1 + %v_468 = alloca i16, align 1 + %v_469 = alloca i32, align 1 + %y0168 = alloca i16, align 1 + %y0169 = alloca i16, align 1 + %y1170 = alloca i16, align 1 + %tmp171 = alloca i16, align 1 + %y1185 = alloca i16, align 1 + %y0186 = alloca i16, align 1 + %y1187 = alloca i16, align 1 + %tmp188 = alloca i16, align 1 + %tmp219 = alloca i16, align 1 + %v_loop_1 = alloca i32, align 1 + %v_479 = alloca i16, align 1 + %v_480 = alloca i32, align 1 + %v_481 = alloca { i24, i24 }, align 1 + %v_482 = alloca i64, align 1 + %y285 = alloca i64, align 1 + %y1286 = alloca i16, align 1 + %tmp287 = alloca i16, align 1 + %tmp298 = alloca i64, align 1 + %v_493 = alloca { i64, i64 }, align 1 + %v_496 = alloca i40, align 1 + %y0320 = alloca i16, align 1 + %y1321 = alloca i16, align 1 + %tmp322 = alloca i16, align 1 + %v_loop_2 = alloca i16, align 1 + %y0423 = alloca i64, align 1 + %y1424 = alloca i64, align 1 + %tmp425 = alloca i64, align 1 + %y460 = alloca i64, align 1 + %tmp462 = alloca i64, align 1 + %v_521 = alloca double, align 1 + %v_529 = alloca double*, align 1 + %v_536 = alloca i24, align 1 + %y0491 = alloca double, align 1 + %y1493 = alloca double, align 1 + %tmp495 = alloca double, align 1 + %v_549 = alloca double, align 1 + %v_557 = alloca float, align 1 + %v_559 = alloca { i40, i40 }, align 1 + %v_560 = alloca { i16, i16 }, align 1 + %v_562 = alloca i16, align 1 + %cleanup.dest.slot = alloca i32, align 1 + %v_572 = alloca { i16, i16 }, align 1 + %v_575 = alloca i16, align 1 + %v_loop_2541 = alloca i64, align 1 + %tmp572 = alloca { double, double }, align 1 + %tmp585 = alloca { double, double }, align 1 + %tmp599 = alloca { double, double }, align 1 + %tmp621 = alloca { double, double }, align 1 + %tmp649 = alloca { double, double }, align 1 + %tmp671 = alloca { double, double }, align 1 + %tmp703 = alloca { double, double }, align 1 + %tmp717 = alloca { double, double }, align 1 + %tmp737 = alloca { double, double }, align 1 + %v_586 = alloca i24, align 1 + %v_587 = alloca { i16, i16 }, align 1 + %v_588 = alloca i16, align 1 + %v_593 = alloca { float, float }, align 1 + %y0782 = alloca i64, align 1 + %y1783 = alloca i64, align 1 + %tmp784 = alloca i64, align 1 + %indirect-arg-temp803 = alloca { float, float }, align 1 + %indirect-arg-temp804 = alloca { i64, i64 }, align 1 + %v_623 = alloca { i40, i40 }, align 1 + %v_627 = alloca double, align 1 + %v_628 = alloca i24, align 1 + %v_637 = alloca { i16, i16 }, align 1 + %v_640 = alloca %struct.s_2, align 1 + %y0818 = alloca double, align 1 + %y1819 = alloca double, align 1 + %y0820 = alloca double, align 1 + %y1821 = alloca double, align 1 + %tmp822 = alloca double, align 1 + %tmp833 = alloca double, align 1 + %v_loop_0 = alloca i16, align 1 + %v_loop_0872 = alloca i16, align 1 + %v_loop_0885 = alloca i16, align 1 + %v_loop_0897 = alloca i16, align 1 + %v_loop_0913 = alloca i16, align 1 + %v_loop_0940 = alloca i16, align 1 + %v_loop_0968 = alloca i16, align 1 + %v_loop_0981 = alloca i16, align 1 + %v_loop_0994 = alloca i16, align 1 + %v_loop_01007 = alloca i16, align 1 + %v_loop_01019 = alloca i16, align 1 + %v_loop_01035 = alloca i16, align 1 + %v_loop_01049 = alloca i16, align 1 + %v_loop_01062 = alloca i16, align 1 + %v_loop_01074 = alloca i16, align 1 + %v_loop_11080 = alloca i16, align 1 + %v_loop_01097 = alloca i16, align 1 + %v_loop_01108 = alloca i16, align 1 + %v_loop_01119 = alloca i16, align 1 + %v_loop_01130 = alloca i16, align 1 + %v_loop_01144 = alloca i16, align 1 + %v_loop_01157 = alloca i16, align 1 + %v_loop_01170 = alloca i16, align 1 + %v_loop_01184 = alloca i16, align 1 + %v_loop_01198 = alloca i16, align 1 + %v_loop_01209 = alloca i16, align 1 + %v_loop_01220 = alloca i16, align 1 + %v_loop_01231 = alloca i16, align 1 + %v_loop_01243 = alloca i16, align 1 + %v_loop_01256 = alloca i16, align 1 + %v_loop_01274 = alloca i16, align 1 + store i16 0, i16* %retval, align 1 + %0 = load i64, i64* @v_4, align 1 + %rem = urem i64 %0, 6 + %idxprom = trunc i64 %rem to i32 + %idxprom.c = trunc i32 %idxprom to i16 + %1 = add i16 0, %idxprom.c + %arrayidx = getelementptr [6 x { i16, i16 }*], [6 x { i16, i16 }*]* @v_3, i32 0, i32 %idxprom + %2 = sub i16 6, %1 + %3 = icmp ult i16 6, %1 + %4 = icmp ult i16 %2, 1 + %5 = or i1 %3, %4 + br i1 %5, label %trap, label %6 + +6: ; preds = %entry + %7 = load { i16, i16 }*, { i16, i16 }** %arrayidx, align 1 + %.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %7, i32 0, i32 0 + %.real = load i16, i16* %.realp, align 1 + %.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %7, i32 0, i32 1 + %.imag = load i16, i16* %.imagp, align 1 + store i16 %.real, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_1, i32 0, i32 0), align 1 + store i16 %.imag, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_1, i32 0, i32 1), align 1 + %8 = bitcast i64* %v_11 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %8) #1 + store i64 31, i64* %v_11, align 1 + %9 = load i16**, i16*** @v_9, align 1 + %10 = load i16*, i16** %9, align 1 + %v_11.promoted = load i64, i64* %v_11, align 1 + br label %for.inc + +for.inc: ; preds = %for.inc, %6 + %inc17576 = phi i64 [ %v_11.promoted, %6 ], [ %inc1, %for.inc ] + %11 = load i16, i16* @v_6, align 1 + %inc = add i16 %11, 1 + store i16 %inc, i16* @v_6, align 1 + store i16 %11, i16* %10, align 1 + %inc1 = add i64 %inc17576, 1 + %cmp = icmp slt i64 %inc1, 42 + br i1 %cmp, label %for.inc, label %for.end + +for.end: ; preds = %for.inc + %inc175.lcssa = phi i64 [ %inc1, %for.inc ] + store i64 %inc175.lcssa, i64* %v_11, align 1 + %12 = bitcast i64* %v_11 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %12) #1 + %13 = bitcast i24* %v_13 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %13) #1 + %14 = load i24, i24* @v_12, align 1 + store i24 %14, i24* %v_13, align 1 + %15 = load i16, i16* @v_14, align 1 + %resize = zext i16 %15 to i24 + %16 = load i24, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_15, i32 0, i32 0), align 1 + %17 = sub i24 %resize, %16 + %18 = icmp eq i24 6751329, %17 + br i1 %18, label %land.lhs.true, label %if.else + +land.lhs.true: ; preds = %for.end + %19 = load i32, i32* @v_16, align 1 + %cmp2 = icmp sge i32 561401595, %19 + br i1 %cmp2, label %if.then, label %if.else + +if.then: ; preds = %land.lhs.true + %20 = bitcast { i64, i64 }* %v_211 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* %20) #1 + %.real3 = load i24, i24* getelementptr inbounds ([9 x { i24, i24 }], [9 x { i24, i24 }]* @v_18, i32 0, i32 7, i32 0), align 1 + %.imag4 = load i24, i24* getelementptr inbounds ([9 x { i24, i24 }], [9 x { i24, i24 }]* @v_18, i32 0, i32 7, i32 1), align 1 + %21 = load { i64, i64 }**, { i64, i64 }*** @v_21, align 1 + %22 = load { i64, i64 }*, { i64, i64 }** %21, align 1 + %.realp5 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %22, i32 0, i32 0 + %.real6 = load i64, i64* %.realp5, align 1 + %.imagp7 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %22, i32 0, i32 1 + %.imag8 = load i64, i64* %.imagp7, align 1 + %v_22.real = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_22, i32 0, i32 0), align 1 + %v_22.imag = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_22, i32 0, i32 1), align 1 + %v_22.real9 = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_22, i32 0, i32 0), align 1 + %v_22.imag10 = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_22, i32 0, i32 1), align 1 + %23 = load i64, i64* @v_4, align 1 + %24 = bitcast i16* %y0 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %24) #1 + %25 = bitcast i16* %y011 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %25) #1 + store i16 -16382, i16* %y011, align 1 + %26 = bitcast i16* %y1 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %26) #1 + store i16 -16382, i16* %y1, align 1 + %27 = load i16, i16* %y011, align 1 + %cmp12 = icmp sgt i16 %27, 0 + br i1 %cmp12, label %land.lhs.true13, label %lor.lhs.false + +land.lhs.true13: ; preds = %if.then + %28 = load i16, i16* %y1, align 1 + %cmp14 = icmp sgt i16 %28, 0 + br i1 %cmp14, label %land.lhs.true15, label %cond.false + +land.lhs.true15: ; preds = %land.lhs.true13 + %29 = load i16, i16* %y011, align 1 + %30 = load i16, i16* %y1, align 1 + %sub = sub i16 32767, %30 + %cmp16 = icmp sgt i16 %29, %sub + br i1 %cmp16, label %lor.lhs.false26, label %lor.lhs.false + +lor.lhs.false: ; preds = %land.lhs.true15, %if.then + %31 = phi i16 [ %29, %land.lhs.true15 ], [ %27, %if.then ] + %cmp17 = icmp slt i16 %31, 0 + br i1 %cmp17, label %land.lhs.true18, label %cond.false + +land.lhs.true18: ; preds = %lor.lhs.false + %32 = load i16, i16* %y1, align 1 + %cmp19 = icmp slt i16 %32, 0 + br i1 %cmp19, label %land.lhs.true20, label %cond.false + +land.lhs.true20: ; preds = %land.lhs.true18 + %33 = load i16, i16* %y011, align 1 + %34 = load i16, i16* %y1, align 1 + %sub21 = sub i16 -32768, %34 + %cmp22 = icmp slt i16 %33, %sub21 + br i1 %cmp22, label %lor.lhs.false26, label %cond.false + +cond.false: ; preds = %land.lhs.true20, %land.lhs.true18, %lor.lhs.false, %land.lhs.true13 + %35 = load i16, i16* %y011, align 1 + %36 = load i16, i16* %y1, align 1 + %add = add i16 %35, %36 + br label %lor.lhs.false26 + +lor.lhs.false26: ; preds = %cond.false, %land.lhs.true20, %land.lhs.true15 + %cond = phi i16 [ %add, %cond.false ], [ 0, %land.lhs.true20 ], [ 0, %land.lhs.true15 ] + store i16 %cond, i16* %tmp, align 1 + %37 = bitcast i16* %y1 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %37) #1 + %38 = bitcast i16* %y011 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %38) #1 + store i16 %cond, i16* %y0, align 1 + %39 = bitcast i16* %y123 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %39) #1 + store i16 -16382, i16* %y123, align 1 + %cmp27 = icmp eq i16 %cond, -32768 + br i1 %cmp27, label %land.lhs.true28, label %cond.false31 + +land.lhs.true28: ; preds = %lor.lhs.false26 + %40 = load i16, i16* %y123, align 1 + %cmp29 = icmp eq i16 %40, -1 + br i1 %cmp29, label %cond.true30, label %cond.false31 + +cond.true30: ; preds = %land.lhs.true28 + %41 = load i16, i16* %y0, align 1 + br label %cond.end32 + +cond.false31: ; preds = %land.lhs.true28, %lor.lhs.false26 + %42 = load i16, i16* %y0, align 1 + %43 = load i16, i16* %y123, align 1 + %div = sdiv i16 %42, %43 + br label %cond.end32 + +cond.end32: ; preds = %cond.false31, %cond.true30 + %cond33 = phi i16 [ %41, %cond.true30 ], [ %div, %cond.false31 ] + store i16 %cond33, i16* %tmp24, align 1 + %44 = bitcast i16* %y123 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %44) #1 + %45 = bitcast i16* %y0 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %45) #1 + %46 = load i16, i16* %tmp24, align 1 + %indirect-arg-temp.realp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %indirect-arg-temp, i32 0, i32 0 + %indirect-arg-temp.imagp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %indirect-arg-temp, i32 0, i32 1 + store i24 %.real3, i24* %indirect-arg-temp.realp, align 1 + store i24 %.imag4, i24* %indirect-arg-temp.imagp, align 1 + %indirect-arg-temp35.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %indirect-arg-temp35, i32 0, i32 0 + %indirect-arg-temp35.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %indirect-arg-temp35, i32 0, i32 1 + store i64 %.real6, i64* %indirect-arg-temp35.realp, align 1 + store i64 %.imag8, i64* %indirect-arg-temp35.imagp, align 1 + %indirect-arg-temp36.realp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp36, i32 0, i32 0 + %indirect-arg-temp36.imagp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp36, i32 0, i32 1 + store float %v_22.real, float* %indirect-arg-temp36.realp, align 1 + store float %v_22.imag, float* %indirect-arg-temp36.imagp, align 1 + %indirect-arg-temp37.realp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp37, i32 0, i32 0 + %indirect-arg-temp37.imagp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp37, i32 0, i32 1 + store float %v_22.real9, float* %indirect-arg-temp37.realp, align 1 + store float %v_22.imag10, float* %indirect-arg-temp37.imagp, align 1 + %indirect-arg-temp38.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %indirect-arg-temp38, i32 0, i32 0 + %indirect-arg-temp38.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %indirect-arg-temp38, i32 0, i32 1 + store i64 18014398509481982, i64* %indirect-arg-temp38.realp, align 1 + store i64 2097151, i64* %indirect-arg-temp38.imagp, align 1 + %tmp34.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp34, i32 0, i32 0 + %tmp34.real = load i64, i64* %tmp34.realp, align 1 + %tmp34.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %tmp34, i32 0, i32 1 + %tmp34.imag = load i64, i64* %tmp34.imagp, align 1 + %v_211.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %v_211, i32 0, i32 0 + %v_211.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %v_211, i32 0, i32 1 + store i64 %tmp34.real, i64* %v_211.realp, align 1 + store i64 %tmp34.imag, i64* %v_211.imagp, align 1 + %47 = load { float, float }*, { float, float }** @v_213, align 1 + %.imagp39 = getelementptr inbounds { float, float }, { float, float }* %47, i32 0, i32 1 + %48 = load float, float* %.imagp39, align 1 + store float %48, float* @v_214, align 1 + store double 0x408BB520593D3311, double* @v_207, align 1 + %49 = load i32, i32* @v_215, align 1 + %inc40 = add i32 %49, 1 + store i32 %inc40, i32* @v_215, align 1 + store i32 %49, i32* @v_216, align 1 + %50 = bitcast { i24, i24 }* %v_217 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %50) #1 + %.real41 = load i24, i24* getelementptr inbounds ([9 x { i24, i24 }], [9 x { i24, i24 }]* @v_18, i32 0, i32 7, i32 0), align 1 + %.imag42 = load i24, i24* getelementptr inbounds ([9 x { i24, i24 }], [9 x { i24, i24 }]* @v_18, i32 0, i32 7, i32 1), align 1 + %v_217.realp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %v_217, i32 0, i32 0 + %v_217.imagp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %v_217, i32 0, i32 1 + store i24 %.real41, i24* %v_217.realp, align 1 + store i24 %.imag42, i24* %v_217.imagp, align 1 + %51 = load i16***, i16**** @v_221, align 1 + %52 = load i16**, i16*** %51, align 1 + %53 = load i16*, i16** %52, align 1 + store volatile i16 2046, i16* %53, align 1 + %54 = load i64**, i64*** @v_224, align 1 + %55 = load i64*, i64** %54, align 1 + %56 = load i64, i64* %55, align 1 + %conv = uitofp i64 %56 to double + store double %conv, double* @v_207, align 1 + store i24 6869438, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_37, i32 0, i32 0), align 1 + store i24 -2747874, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_37, i32 0, i32 1), align 1 + %57 = bitcast { i24, i24 }* %v_217 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %57) #1 + %58 = bitcast { i64, i64 }* %v_211 to i8* + call void @llvm.lifetime.end.p0i8(i64 8, i8* %58) #1 + br label %if.end144 + +if.else: ; preds = %land.lhs.true, %for.end + %59 = load i40, i40* @v_225, align 1 + %60 = load i32, i32* @v_228, align 1 + %rem43 = urem i32 %60, 5 + %rem43.c = trunc i32 %rem43 to i16 + %arrayidx44.idx = mul i16 %rem43.c, 2 + %61 = add i16 0, %arrayidx44.idx + %arrayidx44 = getelementptr [5 x { i16, i16 }], [5 x { i16, i16 }]* @v_227, i32 0, i32 %rem43 + %arrayidx44.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %arrayidx44, i32 0, i32 0 + %62 = sub i16 10, %61 + %63 = icmp ult i16 10, %61 + %64 = icmp ult i16 %62, 1 + %65 = or i1 %63, %64 + br i1 %65, label %trap, label %66 + +66: ; preds = %if.else + %arrayidx44.real = load i16, i16* %arrayidx44.realp, align 1 + %67 = add i16 %61, 1 + %arrayidx44.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %arrayidx44, i32 0, i32 1 + %68 = sub i16 10, %67 + %69 = icmp ult i16 10, %67 + %70 = icmp ult i16 %68, 1 + %71 = or i1 %69, %70 + br i1 %71, label %trap, label %72 + +72: ; preds = %66 + %arrayidx44.imag = load i16, i16* %arrayidx44.imagp, align 1 + %v_229.real = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_229, i32 0, i32 0), align 1 + %v_229.imag = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_229, i32 0, i32 1), align 1 + %73 = load volatile i24, i24* @v_230, align 1 + %v_231.real = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_231, i32 0, i32 0), align 1 + %v_231.imag = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_231, i32 0, i32 1), align 1 + %indirect-arg-temp45.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %indirect-arg-temp45, i32 0, i32 0 + %indirect-arg-temp45.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %indirect-arg-temp45, i32 0, i32 1 + store i16 %arrayidx44.real, i16* %indirect-arg-temp45.realp, align 1 + store i16 %arrayidx44.imag, i16* %indirect-arg-temp45.imagp, align 1 + %indirect-arg-temp46.realp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp46, i32 0, i32 0 + %indirect-arg-temp46.imagp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp46, i32 0, i32 1 + store float %v_229.real, float* %indirect-arg-temp46.realp, align 1 + store float %v_229.imag, float* %indirect-arg-temp46.imagp, align 1 + %indirect-arg-temp47.realp = getelementptr inbounds { i32, i32 }, { i32, i32 }* %indirect-arg-temp47, i32 0, i32 0 + %indirect-arg-temp47.imagp = getelementptr inbounds { i32, i32 }, { i32, i32 }* %indirect-arg-temp47, i32 0, i32 1 + store i32 %v_231.real, i32* %indirect-arg-temp47.realp, align 1 + store i32 %v_231.imag, i32* %indirect-arg-temp47.imagp, align 1 + %indirect-arg-temp48.realp = getelementptr inbounds { i40, i40 }, { i40, i40 }* %indirect-arg-temp48, i32 0, i32 0 + %indirect-arg-temp48.imagp = getelementptr inbounds { i40, i40 }, { i40, i40 }* %indirect-arg-temp48, i32 0, i32 1 + store i40 -281501259870, i40* %indirect-arg-temp48.realp, align 1 + store i40 -214975167849, i40* %indirect-arg-temp48.imagp, align 1 + %74 = bitcast i16* %v_438 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %74) #1 + %75 = load i16*, i16** @v_8, align 1 + %76 = load i16, i16* %75, align 1 + %inc49 = add i16 %76, 1 + store i16 %inc49, i16* %75, align 1 + store i16 %76, i16* %v_438, align 1 + %v_332.real = load i24, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_332, i32 0, i32 0), align 1 + %v_332.imag = load i24, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_332, i32 0, i32 1), align 1 + store i24 %v_332.real, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_439, i32 0, i32 0), align 1 + store i24 %v_332.imag, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_439, i32 0, i32 1), align 1 + %77 = load float**, float*** @v_442, align 1 + %78 = load float*, float** %77, align 1 + %79 = load float, float* %78, align 1 + store float %79, float* @v_264, align 1 + %v_367.real = load i24, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_367, i32 0, i32 0), align 1 + %v_367.imag = load i24, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_367, i32 0, i32 1), align 1 + store i24 %v_367.real, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_399, i32 0, i32 0), align 1 + store i24 %v_367.imag, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_399, i32 0, i32 1), align 1 + %80 = load i32, i32* @v_228, align 1 + %inc50 = add i32 %80, 1 + store i32 %inc50, i32* @v_228, align 1 + %rem51 = urem i32 %80, 2 + %arrayidx52.idx = mul i32 %rem51, 2 + %81 = add i32 12, %arrayidx52.idx + %arrayidx52 = getelementptr [2 x i32], [2 x i32]* getelementptr inbounds ([4 x [2 x i32]], [4 x [2 x i32]]* @v_445, i32 0, i32 3), i32 0, i32 %rem51 + %82 = sub i32 16, %81 + %83 = load i32, i32* %arrayidx52, align 1 + %dec = add i32 %83, -1 + %84 = sub i32 16, %81 + store i32 %dec, i32* %arrayidx52, align 1 + store i32 %dec, i32* @v_443, align 1 + %85 = bitcast { float, float }* %v_446 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %85) #1 + %v_446.realp = getelementptr inbounds { float, float }, { float, float }* %v_446, i32 0, i32 0 + %v_446.imagp = getelementptr inbounds { float, float }, { float, float }* %v_446, i32 0, i32 1 + store volatile float 0x4060998D80000000, float* %v_446.realp, align 1 + store volatile float 0xC08E386BE0000000, float* %v_446.imagp, align 1 + %86 = load double, double* @v_432, align 1 + %add53 = fadd double 0x3FCDB50000000000, %86 + %cmp54 = fcmp oge double %add53, 0x4059C93980000000 + br i1 %cmp54, label %if.then60, label %lor.lhs.false56 + +lor.lhs.false56: ; preds = %72 + %87 = load i32, i32* @v_16, align 1 + %tobool = icmp ne i32 %87, 0 + br i1 %tobool, label %land.lhs.true57, label %if.else143 + +land.lhs.true57: ; preds = %lor.lhs.false56 + %88 = load i32, i32* @v_443, align 1 + %inc58 = add i32 %88, 1 + store i32 %inc58, i32* @v_443, align 1 + %tobool59 = icmp ne i32 %inc58, 0 + br i1 %tobool59, label %if.then60, label %if.else143 + +if.then60: ; preds = %land.lhs.true57, %72 + %89 = bitcast i16* %v_loop_3 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %89) #1 + store i16 0, i16* %v_loop_3, align 1 + br label %for.body65 + +for.body65: ; preds = %for.inc140, %if.then60 + %90 = load i16, i16* %v_loop_3, align 1 + %idxprom66 = zext i16 %90 to i32 + %arrayidx67.idx = mul i16 %90, 4 + %arrayidx67 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom66 + %91 = sub i16 256, %arrayidx67.idx + %92 = icmp ult i16 256, %arrayidx67.idx + %93 = icmp ult i16 %91, 4 + %94 = or i1 %92, %93 + br i1 %94, label %trap.loopexit74, label %95 + +95: ; preds = %for.body65 + %96 = load double, double* %arrayidx67, align 1 + %97 = load i16, i16* %v_loop_3, align 1 + %idxprom68 = zext i16 %97 to i32 + %arrayidx69.idx = mul i16 %97, 4 + %arrayidx69 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom68 + %98 = sub i16 256, %arrayidx69.idx + %99 = icmp ult i16 256, %arrayidx69.idx + %100 = icmp ult i16 %98, 4 + %101 = or i1 %99, %100 + br i1 %101, label %trap.loopexit74, label %102 + +102: ; preds = %95 + %103 = load double, double* %arrayidx69, align 1 + %mul = fmul double %96, %103 + %104 = load i16, i16* %v_loop_3, align 1 + %idxprom70 = zext i16 %104 to i32 + %arrayidx71.idx = mul i16 %104, 4 + %arrayidx71 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom70 + %105 = sub i16 256, %arrayidx71.idx + %106 = icmp ult i16 256, %arrayidx71.idx + %107 = icmp ult i16 %105, 4 + %108 = or i1 %106, %107 + br i1 %108, label %trap.loopexit74, label %109 + +109: ; preds = %102 + %110 = load double, double* %arrayidx71, align 1 + %add72 = fadd double %mul, %110 + %111 = load i16, i16* %v_loop_3, align 1 + %idxprom73 = zext i16 %111 to i32 + %arrayidx74.idx = mul i16 %111, 4 + %arrayidx74 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom73 + %112 = sub i16 256, %arrayidx74.idx + %113 = icmp ult i16 256, %arrayidx74.idx + %114 = icmp ult i16 %112, 4 + %115 = or i1 %113, %114 + br i1 %115, label %trap.loopexit74, label %116 + +116: ; preds = %109 + store double %add72, double* %arrayidx74, align 1 + %117 = load i16, i16* %v_loop_3, align 1 + %idxprom75 = zext i16 %117 to i32 + %arrayidx76.idx = mul i16 %117, 4 + %arrayidx76 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom75 + %118 = sub i16 256, %arrayidx76.idx + %119 = icmp ult i16 256, %arrayidx76.idx + %120 = icmp ult i16 %118, 4 + %121 = or i1 %119, %120 + br i1 %121, label %trap.loopexit74, label %122 + +122: ; preds = %116 + %123 = load double, double* %arrayidx76, align 1 + %add77 = fadd double 0xC0F35C69A414F5F4, %123 + %mul78 = fmul double 0x4085FAEA69C3681F, %add77 + %124 = load i16, i16* %v_loop_3, align 1 + %idxprom79 = zext i16 %124 to i32 + %arrayidx80.idx = mul i16 %124, 4 + %arrayidx80 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom79 + %125 = sub i16 256, %arrayidx80.idx + %126 = icmp ult i16 256, %arrayidx80.idx + %127 = icmp ult i16 %125, 4 + %128 = or i1 %126, %127 + br i1 %128, label %trap.loopexit74, label %129 + +129: ; preds = %122 + store double %mul78, double* %arrayidx80, align 1 + %130 = load i16, i16* %v_loop_3, align 1 + %idxprom81 = zext i16 %130 to i32 + %arrayidx82.idx = mul i16 %130, 4 + %arrayidx82 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom81 + %131 = sub i16 256, %arrayidx82.idx + %132 = icmp ult i16 256, %arrayidx82.idx + %133 = icmp ult i16 %131, 4 + %134 = or i1 %132, %133 + br i1 %134, label %trap.loopexit74, label %135 + +135: ; preds = %129 + %136 = load double, double* %arrayidx82, align 1 + %137 = load i16, i16* %v_loop_3, align 1 + %idxprom83 = zext i16 %137 to i32 + %arrayidx84.idx = mul i16 %137, 4 + %arrayidx84 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom83 + %138 = sub i16 256, %arrayidx84.idx + %139 = icmp ult i16 256, %arrayidx84.idx + %140 = icmp ult i16 %138, 4 + %141 = or i1 %139, %140 + br i1 %141, label %trap.loopexit74, label %142 + +142: ; preds = %135 + %143 = load double, double* %arrayidx84, align 1 + %144 = load i16, i16* %v_loop_3, align 1 + %idxprom85 = zext i16 %144 to i32 + %arrayidx86.idx = mul i16 %144, 4 + %arrayidx86 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom85 + %145 = sub i16 256, %arrayidx86.idx + %146 = icmp ult i16 256, %arrayidx86.idx + %147 = icmp ult i16 %145, 4 + %148 = or i1 %146, %147 + br i1 %148, label %trap.loopexit74, label %149 + +149: ; preds = %142 + %150 = load double, double* %arrayidx86, align 1 + %add87 = fadd double %143, %150 + %sub88 = fsub double %136, %add87 + %151 = load i16, i16* %v_loop_3, align 1 + %idxprom89 = zext i16 %151 to i32 + %arrayidx90.idx = mul i16 %151, 4 + %arrayidx90 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom89 + %152 = sub i16 256, %arrayidx90.idx + %153 = icmp ult i16 256, %arrayidx90.idx + %154 = icmp ult i16 %152, 4 + %155 = or i1 %153, %154 + br i1 %155, label %trap.loopexit74, label %156 + +156: ; preds = %149 + store double %sub88, double* %arrayidx90, align 1 + %157 = load i16, i16* %v_loop_3, align 1 + %idxprom91 = zext i16 %157 to i32 + %arrayidx92.idx = mul i16 %157, 4 + %arrayidx92 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom91 + %158 = sub i16 256, %arrayidx92.idx + %159 = icmp ult i16 256, %arrayidx92.idx + %160 = icmp ult i16 %158, 4 + %161 = or i1 %159, %160 + br i1 %161, label %trap.loopexit74, label %162 + +162: ; preds = %156 + %163 = load double, double* %arrayidx92, align 1 + %164 = load i16, i16* %v_loop_3, align 1 + %idxprom93 = zext i16 %164 to i32 + %arrayidx94.idx = mul i16 %164, 4 + %arrayidx94 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom93 + %165 = sub i16 256, %arrayidx94.idx + %166 = icmp ult i16 256, %arrayidx94.idx + %167 = icmp ult i16 %165, 4 + %168 = or i1 %166, %167 + br i1 %168, label %trap.loopexit74, label %169 + +169: ; preds = %162 + %170 = load double, double* %arrayidx94, align 1 + %171 = load i16, i16* %v_loop_3, align 1 + %idxprom95 = zext i16 %171 to i32 + %arrayidx96.idx = mul i16 %171, 4 + %arrayidx96 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom95 + %172 = sub i16 256, %arrayidx96.idx + %173 = icmp ult i16 256, %arrayidx96.idx + %174 = icmp ult i16 %172, 4 + %175 = or i1 %173, %174 + br i1 %175, label %trap.loopexit74, label %176 + +176: ; preds = %169 + %177 = load double, double* %arrayidx96, align 1 + %sub97 = fsub double %170, %177 + %sub98 = fsub double %163, %sub97 + %178 = load i16, i16* %v_loop_3, align 1 + %idxprom99 = zext i16 %178 to i32 + %arrayidx100.idx = mul i16 %178, 4 + %arrayidx100 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom99 + %179 = sub i16 256, %arrayidx100.idx + %180 = icmp ult i16 256, %arrayidx100.idx + %181 = icmp ult i16 %179, 4 + %182 = or i1 %180, %181 + br i1 %182, label %trap.loopexit74, label %183 + +183: ; preds = %176 + store double %sub98, double* %arrayidx100, align 1 + %184 = load i16, i16* %v_loop_3, align 1 + %idxprom101 = zext i16 %184 to i32 + %arrayidx102.idx = mul i16 %184, 4 + %arrayidx102 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom101 + %185 = sub i16 256, %arrayidx102.idx + %186 = icmp ult i16 256, %arrayidx102.idx + %187 = icmp ult i16 %185, 4 + %188 = or i1 %186, %187 + br i1 %188, label %trap.loopexit74, label %189 + +189: ; preds = %183 + %190 = load double, double* %arrayidx102, align 1 + %191 = load i16, i16* %v_loop_3, align 1 + %idxprom103 = zext i16 %191 to i32 + %arrayidx104.idx = mul i16 %191, 4 + %arrayidx104 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom103 + %192 = sub i16 256, %arrayidx104.idx + %193 = icmp ult i16 256, %arrayidx104.idx + %194 = icmp ult i16 %192, 4 + %195 = or i1 %193, %194 + br i1 %195, label %trap.loopexit74, label %196 + +196: ; preds = %189 + %197 = load double, double* %arrayidx104, align 1 + %mul105 = fmul double %190, %197 + %198 = load i16, i16* %v_loop_3, align 1 + %idxprom106 = zext i16 %198 to i32 + %arrayidx107.idx = mul i16 %198, 4 + %arrayidx107 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom106 + %199 = sub i16 256, %arrayidx107.idx + %200 = icmp ult i16 256, %arrayidx107.idx + %201 = icmp ult i16 %199, 4 + %202 = or i1 %200, %201 + br i1 %202, label %trap.loopexit74, label %203 + +203: ; preds = %196 + %204 = load double, double* %arrayidx107, align 1 + %add108 = fadd double %mul105, %204 + %sub109 = fsub double %add108, 0x4085FE7BC2016CBD + %205 = load i16, i16* %v_loop_3, align 1 + %idxprom110 = zext i16 %205 to i32 + %arrayidx111.idx = mul i16 %205, 4 + %arrayidx111 = getelementptr [64 x double], [64 x double]* @v_454, i32 0, i32 %idxprom110 + %206 = sub i16 256, %arrayidx111.idx + %207 = icmp ult i16 256, %arrayidx111.idx + %208 = icmp ult i16 %206, 4 + %209 = or i1 %207, %208 + br i1 %209, label %trap.loopexit74, label %210 + +210: ; preds = %203 + store double %sub109, double* %arrayidx111, align 1 + %211 = load i16, i16* %v_loop_3, align 1 + %idxprom112 = zext i16 %211 to i32 + %arrayidx113.idx = mul i16 %211, 4 + %arrayidx113 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom112 + %212 = sub i16 256, %arrayidx113.idx + %213 = icmp ult i16 256, %arrayidx113.idx + %214 = icmp ult i16 %212, 4 + %215 = or i1 %213, %214 + br i1 %215, label %trap.loopexit74, label %216 + +216: ; preds = %210 + %217 = load double, double* %arrayidx113, align 1 + %add114 = fadd double %217, 0x4081365A73C84EB9 + %218 = load i16, i16* %v_loop_3, align 1 + %idxprom115 = zext i16 %218 to i32 + %arrayidx116.idx = mul i16 %218, 4 + %arrayidx116 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom115 + %219 = sub i16 256, %arrayidx116.idx + %220 = icmp ult i16 256, %arrayidx116.idx + %221 = icmp ult i16 %219, 4 + %222 = or i1 %220, %221 + br i1 %222, label %trap.loopexit74, label %223 + +223: ; preds = %216 + store double %add114, double* %arrayidx116, align 1 + %224 = load i16, i16* %v_loop_3, align 1 + %idxprom117 = zext i16 %224 to i32 + %arrayidx118.idx = mul i16 %224, 4 + %arrayidx118 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom117 + %225 = sub i16 256, %arrayidx118.idx + %226 = icmp ult i16 256, %arrayidx118.idx + %227 = icmp ult i16 %225, 4 + %228 = or i1 %226, %227 + br i1 %228, label %trap.loopexit74, label %229 + +229: ; preds = %223 + %230 = load double, double* %arrayidx118, align 1 + %231 = load i16, i16* %v_loop_3, align 1 + %idxprom119 = zext i16 %231 to i32 + %arrayidx120.idx = mul i16 %231, 4 + %arrayidx120 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom119 + %232 = sub i16 256, %arrayidx120.idx + %233 = icmp ult i16 256, %arrayidx120.idx + %234 = icmp ult i16 %232, 4 + %235 = or i1 %233, %234 + br i1 %235, label %trap.loopexit74, label %236 + +236: ; preds = %229 + %237 = load double, double* %arrayidx120, align 1 + %sub121 = fsub double %230, %237 + %238 = load i16, i16* %v_loop_3, align 1 + %idxprom122 = zext i16 %238 to i32 + %arrayidx123.idx = mul i16 %238, 4 + %arrayidx123 = getelementptr [64 x double], [64 x double]* @v_454, i32 0, i32 %idxprom122 + %239 = sub i16 256, %arrayidx123.idx + %240 = icmp ult i16 256, %arrayidx123.idx + %241 = icmp ult i16 %239, 4 + %242 = or i1 %240, %241 + br i1 %242, label %trap.loopexit74, label %243 + +243: ; preds = %236 + store double %sub121, double* %arrayidx123, align 1 + %244 = load i16, i16* %v_loop_3, align 1 + %idxprom124 = zext i16 %244 to i32 + %arrayidx125.idx = mul i16 %244, 4 + %arrayidx125 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom124 + %245 = sub i16 256, %arrayidx125.idx + %246 = icmp ult i16 256, %arrayidx125.idx + %247 = icmp ult i16 %245, 4 + %248 = or i1 %246, %247 + br i1 %248, label %trap.loopexit74, label %249 + +249: ; preds = %243 + %250 = load double, double* %arrayidx125, align 1 + %251 = load i16, i16* %v_loop_3, align 1 + %idxprom126 = zext i16 %251 to i32 + %arrayidx127.idx = mul i16 %251, 4 + %arrayidx127 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom126 + %252 = sub i16 256, %arrayidx127.idx + %253 = icmp ult i16 256, %arrayidx127.idx + %254 = icmp ult i16 %252, 4 + %255 = or i1 %253, %254 + br i1 %255, label %trap.loopexit74, label %256 + +256: ; preds = %249 + %257 = load double, double* %arrayidx127, align 1 + %add128 = fadd double %250, %257 + %258 = load i16, i16* %v_loop_3, align 1 + %idxprom129 = zext i16 %258 to i32 + %arrayidx130.idx = mul i16 %258, 4 + %arrayidx130 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom129 + %259 = sub i16 256, %arrayidx130.idx + %260 = icmp ult i16 256, %arrayidx130.idx + %261 = icmp ult i16 %259, 4 + %262 = or i1 %260, %261 + br i1 %262, label %trap.loopexit74, label %263 + +263: ; preds = %256 + store double %add128, double* %arrayidx130, align 1 + %264 = load i16, i16* %v_loop_3, align 1 + %idxprom131 = zext i16 %264 to i32 + %arrayidx132.idx = mul i16 %264, 4 + %arrayidx132 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom131 + %265 = sub i16 256, %arrayidx132.idx + %266 = icmp ult i16 256, %arrayidx132.idx + %267 = icmp ult i16 %265, 4 + %268 = or i1 %266, %267 + br i1 %268, label %trap.loopexit74, label %269 + +269: ; preds = %263 + store double 0x4081887B7A0803DA, double* %arrayidx132, align 1 + %270 = load i16, i16* %v_loop_3, align 1 + %idxprom133 = zext i16 %270 to i32 + %arrayidx134.idx = mul i16 %270, 4 + %arrayidx134 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom133 + %271 = sub i16 256, %arrayidx134.idx + %272 = icmp ult i16 256, %arrayidx134.idx + %273 = icmp ult i16 %271, 4 + %274 = or i1 %272, %273 + br i1 %274, label %trap.loopexit74, label %275 + +275: ; preds = %269 + %276 = load double, double* %arrayidx134, align 1 + %277 = load i16, i16* %v_loop_3, align 1 + %idxprom135 = zext i16 %277 to i32 + %arrayidx136.idx = mul i16 %277, 4 + %arrayidx136 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom135 + %278 = sub i16 256, %arrayidx136.idx + %279 = icmp ult i16 256, %arrayidx136.idx + %280 = icmp ult i16 %278, 4 + %281 = or i1 %279, %280 + br i1 %281, label %trap.loopexit74, label %282 + +282: ; preds = %275 + %283 = load double, double* %arrayidx136, align 1 + %sub137 = fsub double %276, %283 + %284 = load i16, i16* %v_loop_3, align 1 + %idxprom138 = zext i16 %284 to i32 + %arrayidx139.idx = mul i16 %284, 4 + %arrayidx139 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom138 + %285 = sub i16 256, %arrayidx139.idx + %286 = icmp ult i16 256, %arrayidx139.idx + %287 = icmp ult i16 %285, 4 + %288 = or i1 %286, %287 + br i1 %288, label %trap.loopexit74, label %for.inc140 + +for.inc140: ; preds = %282 + store double %sub137, double* %arrayidx139, align 1 + %289 = load i16, i16* %v_loop_3, align 1 + %inc141 = add i16 %289, 1 + store i16 %inc141, i16* %v_loop_3, align 1 + %cmp62 = icmp ult i16 %inc141, 64 + br i1 %cmp62, label %for.body65, label %for.end142 + +for.end142: ; preds = %for.inc140 + %290 = bitcast i16* %v_loop_3 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %290) #1 + %291 = bitcast double* %v_455 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %291) #1 + %292 = load double, double* @v_453, align 1 + store double %292, double* %v_455, align 1 + %293 = bitcast double* %v_455 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %293) #1 + br label %if.end + +if.else143: ; preds = %land.lhs.true57, %lor.lhs.false56 + %294 = load i16, i16* @v_6, align 1 + store i16 %294, i16* %v_438, align 1 + br label %if.end + +if.end: ; preds = %if.else143, %for.end142 + %295 = bitcast { float, float }* %v_446 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %295) #1 + %296 = bitcast i16* %v_438 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %296) #1 + br label %if.end144 + +if.end144: ; preds = %if.end, %cond.end32 + %v_77.real = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_77, i32 0, i32 0), align 1 + %v_77.imag = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_77, i32 0, i32 1), align 1 + store i32 %v_77.real, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_285, i32 0, i32 0), align 1 + store i32 %v_77.imag, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_285, i32 0, i32 1), align 1 + %297 = load i40, i40* @v_225, align 1 + %298 = call i40 @llvm.smul.fix.i40(i40 196608, i40 %297, i32 31) + %299 = load i40, i40* @v_225, align 1 + %300 = call i40 @llvm.smul.fix.i40(i40 %298, i40 %299, i32 31) + %301 = sub i40 %300, 365730463744 + %302 = sub i40 %301, 166908119 + store i40 %302, i40* @v_32, align 1 + store volatile i64* @v_89, i64** @v_457, align 1 + %303 = bitcast i64* %v_460 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %303) #1 + %304 = load { i64, i64 }*, { i64, i64 }** @v_459, align 1 + %.realp145 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %304, i32 0, i32 0 + %305 = load i64, i64* %.realp145, align 1 + store i64 %305, i64* %v_460, align 1 + store i16 510, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_40, i32 0, i32 0), align 1 + store i16 1022, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_40, i32 0, i32 1), align 1 + %306 = bitcast i32* %v_461 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %306) #1 + store i32 2, i32* %v_461, align 1 + %307 = bitcast i32* %y to i8* + %308 = load i32, i32* @v_245, align 1 + %309 = bitcast i32* %y to i8* + %310 = load { i64, i64 }***, { i64, i64 }**** @v_465, align 1 + %311 = bitcast double* %v_466 to i8* + %312 = bitcast i16* %v_468 to i8* + %313 = bitcast i32* %v_469 to i8* + %314 = bitcast i32* %v_469 to i8* + %315 = bitcast i16* %v_468 to i8* + %316 = bitcast double* %v_466 to i8* + %tmp151.promoted = load i32, i32* %tmp151, align 1 + %v_461.promoted = load i32, i32* %v_461, align 1 + br label %for.body150 + +for.body150: ; preds = %for.inc161, %if.end144 + %inc1627778 = phi i32 [ %v_461.promoted, %if.end144 ], [ %inc162, %for.inc161 ] + call void @llvm.lifetime.start.p0i8(i64 2, i8* %307) #1 + store i32 %308, i32* %y, align 1 + %317 = load i32, i32* %y, align 1 + %cmp152 = icmp eq i32 %317, 2147483647 + br i1 %cmp152, label %for.inc161, label %cond.false155 + +cond.false155: ; preds = %for.body150 + %318 = load i32, i32* %y, align 1 + %inc156 = add i32 %318, 1 + store i32 %inc156, i32* %y, align 1 + br label %for.inc161 + +for.inc161: ; preds = %cond.false155, %for.body150 + %cond158 = phi i32 [ %inc156, %cond.false155 ], [ 0, %for.body150 ] + call void @llvm.lifetime.end.p0i8(i64 2, i8* %309) #1 + store i32 %cond158, i32* @v_16, align 1 + %319 = load { i64, i64 }**, { i64, i64 }*** %310, align 1 + %320 = load { i64, i64 }*, { i64, i64 }** %319, align 1 + %.realp159 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %320, i32 0, i32 0 + %.imagp160 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %320, i32 0, i32 1 + store i64 2534617317417655939, i64* %.realp159, align 1 + store i64 2, i64* %.imagp160, align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %311) #1 + store double 0x406381BC3FF1DD7D, double* %v_466, align 1 + call void @llvm.lifetime.start.p0i8(i64 1, i8* %312) #1 + %321 = load volatile i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_467, i32 0, i32 0), align 1 + store volatile i16 %321, i16* %v_468, align 1 + call void @llvm.lifetime.start.p0i8(i64 2, i8* %313) #1 + %322 = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_43, i32 0, i32 1), align 1 + store i32 %322, i32* %v_469, align 1 + call void @llvm.lifetime.end.p0i8(i64 2, i8* %314) #1 + call void @llvm.lifetime.end.p0i8(i64 1, i8* %315) #1 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %316) #1 + %inc162 = add i32 %inc1627778, 1 + %cmp147 = icmp slt i32 %inc162, 19 + br i1 %cmp147, label %for.body150, label %for.end163 + +for.end163: ; preds = %for.inc161 + %inc16277.lcssa = phi i32 [ %inc162, %for.inc161 ] + %.lcssa = phi i32 [ %cond158, %for.inc161 ] + store i32 %.lcssa, i32* %tmp151, align 1 + store i32 %inc16277.lcssa, i32* %v_461, align 1 + %323 = bitcast i32* %v_461 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %323) #1 + %324 = load { i64, i64 }*, { i64, i64 }** @v_463, align 1 + %.realp164 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %324, i32 0, i32 0 + %.real165 = load i64, i64* %.realp164, align 1 + %.imagp166 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %324, i32 0, i32 1 + %.imag167 = load i64, i64* %.imagp166, align 1 + store i64 %.real165, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_462, i32 0, i32 0), align 1 + store i64 %.imag167, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_462, i32 0, i32 1), align 1 + %325 = load float, float* @v_264, align 1 + %326 = bitcast i16* %y0168 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %326) #1 + %327 = bitcast i16* %y0169 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %327) #1 + store i16 0, i16* %y0169, align 1 + %328 = bitcast i16* %y1170 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %328) #1 + store i16 1023, i16* %y1170, align 1 + %329 = load i16, i16* %y1170, align 1 + %cmp172 = icmp eq i16 %329, 0 + br i1 %cmp172, label %cond.true180, label %lor.lhs.false174 + +lor.lhs.false174: ; preds = %for.end163 + %330 = load i16, i16* %y0169, align 1 + %cmp175 = icmp eq i16 %330, -32768 + br i1 %cmp175, label %land.lhs.true177, label %cond.false181 + +land.lhs.true177: ; preds = %lor.lhs.false174 + %331 = load i16, i16* %y1170, align 1 + %cmp178 = icmp eq i16 %331, -1 + br i1 %cmp178, label %cond.true180, label %cond.false181 + +cond.true180: ; preds = %land.lhs.true177, %for.end163 + %332 = load i16, i16* %y0169, align 1 + br label %cond.end183 + +cond.false181: ; preds = %land.lhs.true177, %lor.lhs.false174 + %333 = load i16, i16* %y0169, align 1 + %334 = load i16, i16* %y1170, align 1 + %div182 = sdiv i16 %333, %334 + br label %cond.end183 + +cond.end183: ; preds = %cond.false181, %cond.true180 + %cond184 = phi i16 [ %332, %cond.true180 ], [ %div182, %cond.false181 ] + store i16 %cond184, i16* %tmp171, align 1 + %335 = bitcast i16* %y1170 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %335) #1 + %336 = bitcast i16* %y0169 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %336) #1 + %337 = load i16, i16* %tmp171, align 1 + store i16 %337, i16* %y0168, align 1 + %338 = bitcast i16* %y1185 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %338) #1 + %339 = bitcast i16* %y0186 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %339) #1 + %340 = load i16, i16* @v_470, align 1 + store i16 %340, i16* %y0186, align 1 + %341 = bitcast i16* %y1187 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %341) #1 + %342 = load i16, i16* @v_470, align 1 + store i16 %342, i16* %y1187, align 1 + %cmp189 = icmp sgt i16 %340, 0 + br i1 %cmp189, label %land.lhs.true191, label %lor.lhs.false198thread-pre-split + +land.lhs.true191: ; preds = %cond.end183 + %343 = load i16, i16* %y1187, align 1 + %cmp192 = icmp slt i16 %343, 0 + br i1 %cmp192, label %land.lhs.true194, label %lor.lhs.false198thread-pre-split + +land.lhs.true194: ; preds = %land.lhs.true191 + %344 = load i16, i16* %y0186, align 1 + %345 = load i16, i16* %y1187, align 1 + %add195 = add i16 32767, %345 + %cmp196 = icmp sgt i16 %344, %add195 + br i1 %cmp196, label %cond.end217, label %lor.lhs.false198 + +lor.lhs.false198thread-pre-split: ; preds = %land.lhs.true191, %cond.end183 + %.pr = load i16, i16* %y0186, align 1 + br label %lor.lhs.false198 + +lor.lhs.false198: ; preds = %lor.lhs.false198thread-pre-split, %land.lhs.true194 + %346 = phi i16 [ %.pr, %lor.lhs.false198thread-pre-split ], [ %344, %land.lhs.true194 ] + %cmp199 = icmp slt i16 %346, 0 + br i1 %cmp199, label %land.lhs.true201, label %lor.lhs.false208thread-pre-split + +land.lhs.true201: ; preds = %lor.lhs.false198 + %347 = load i16, i16* %y1187, align 1 + %cmp202 = icmp sgt i16 %347, 0 + br i1 %cmp202, label %land.lhs.true204, label %lor.lhs.false208thread-pre-split + +land.lhs.true204: ; preds = %land.lhs.true201 + %348 = load i16, i16* %y0186, align 1 + %349 = load i16, i16* %y1187, align 1 + %add205 = add i16 -32768, %349 + %cmp206 = icmp slt i16 %348, %add205 + br i1 %cmp206, label %cond.end217, label %lor.lhs.false208 + +lor.lhs.false208thread-pre-split: ; preds = %land.lhs.true201, %lor.lhs.false198 + %.pr36 = load i16, i16* %y0186, align 1 + br label %lor.lhs.false208 + +lor.lhs.false208: ; preds = %lor.lhs.false208thread-pre-split, %land.lhs.true204 + %350 = phi i16 [ %.pr36, %lor.lhs.false208thread-pre-split ], [ %348, %land.lhs.true204 ] + %cmp209 = icmp eq i16 %350, 0 + br i1 %cmp209, label %land.lhs.true211, label %cond.false215 + +land.lhs.true211: ; preds = %lor.lhs.false208 + %351 = load i16, i16* %y1187, align 1 + %cmp212 = icmp eq i16 %351, -32768 + br i1 %cmp212, label %cond.end217, label %cond.false215 + +cond.false215: ; preds = %land.lhs.true211, %lor.lhs.false208 + %352 = load i16, i16* %y0186, align 1 + %353 = load i16, i16* %y1187, align 1 + %sub216 = sub i16 %352, %353 + br label %cond.end217 + +cond.end217: ; preds = %cond.false215, %land.lhs.true211, %land.lhs.true204, %land.lhs.true194 + %cond218 = phi i16 [ %sub216, %cond.false215 ], [ 0, %land.lhs.true211 ], [ 0, %land.lhs.true204 ], [ 0, %land.lhs.true194 ] + store i16 %cond218, i16* %tmp188, align 1 + %354 = bitcast i16* %y1187 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %354) #1 + %355 = bitcast i16* %y0186 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %355) #1 + %356 = load i16, i16* %tmp188, align 1 + store i16 %356, i16* %y1185, align 1 + %357 = load i16, i16* %y0168, align 1 + %cmp220 = icmp sgt i16 %357, 0 + br i1 %cmp220, label %land.lhs.true222, label %lor.lhs.false229 + +land.lhs.true222: ; preds = %cond.end217 + %358 = load i16, i16* %y1185, align 1 + %cmp223 = icmp sgt i16 %358, 0 + br i1 %cmp223, label %land.lhs.true225, label %cond.false240 + +land.lhs.true225: ; preds = %land.lhs.true222 + %359 = load i16, i16* %y0168, align 1 + %360 = load i16, i16* %y1185, align 1 + %sub226 = sub i16 32767, %360 + %cmp227 = icmp sgt i16 %359, %sub226 + br i1 %cmp227, label %cond.end242, label %lor.lhs.false229 + +lor.lhs.false229: ; preds = %land.lhs.true225, %cond.end217 + %361 = phi i16 [ %359, %land.lhs.true225 ], [ %357, %cond.end217 ] + %cmp230 = icmp slt i16 %361, 0 + br i1 %cmp230, label %land.lhs.true232, label %cond.false240 + +land.lhs.true232: ; preds = %lor.lhs.false229 + %362 = load i16, i16* %y1185, align 1 + %cmp233 = icmp slt i16 %362, 0 + br i1 %cmp233, label %land.lhs.true235, label %cond.false240 + +land.lhs.true235: ; preds = %land.lhs.true232 + %363 = load i16, i16* %y0168, align 1 + %364 = load i16, i16* %y1185, align 1 + %sub236 = sub i16 -32768, %364 + %cmp237 = icmp slt i16 %363, %sub236 + br i1 %cmp237, label %cond.end242, label %cond.false240 + +cond.false240: ; preds = %land.lhs.true235, %land.lhs.true232, %lor.lhs.false229, %land.lhs.true222 + %365 = load i16, i16* %y0168, align 1 + %366 = load i16, i16* %y1185, align 1 + %add241 = add i16 %365, %366 + br label %cond.end242 + +cond.end242: ; preds = %cond.false240, %land.lhs.true235, %land.lhs.true225 + %cond243 = phi i16 [ %add241, %cond.false240 ], [ 0, %land.lhs.true235 ], [ 0, %land.lhs.true225 ] + store i16 %cond243, i16* %tmp219, align 1 + %367 = bitcast i16* %y1185 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %367) #1 + %368 = bitcast i16* %y0168 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %368) #1 + %369 = load i16, i16* %tmp219, align 1 + %conv244 = sitofp i16 %369 to float + %mul245 = fmul float %325, %conv244 + store float %mul245, float* @v_440, align 1 + %370 = bitcast i32* %v_loop_1 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %370) #1 + store i32 0, i32* %v_loop_1, align 1 + %v_loop_1.promoted = load i32, i32* %v_loop_1, align 1 + br label %for.body250 + +for.body250: ; preds = %for.inc274, %cond.end242 + %inc2757981 = phi i32 [ %v_loop_1.promoted, %cond.end242 ], [ %inc275, %for.inc274 ] + %arrayidx251.idx = mul i32 %inc2757981, 2 + %arrayidx251 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %inc2757981 + %371 = sub i32 56, %arrayidx251.idx + %372 = icmp ult i32 56, %arrayidx251.idx + %373 = icmp ult i32 %371, 2 + %374 = or i1 %372, %373 + br i1 %374, label %trap.loopexit73, label %375 + +375: ; preds = %for.body250 + %376 = load float, float* %arrayidx251, align 1 + %arrayidx252 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %inc2757981 + %377 = or i1 false, false + br i1 %377, label %trap.loopexit73, label %378 + +378: ; preds = %375 + %379 = load float, float* %arrayidx252, align 1 + %sub253 = fsub float %376, %379 + %arrayidx254 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %inc2757981 + %380 = or i1 false, false + br i1 %380, label %trap.loopexit73, label %381 + +381: ; preds = %378 + store float %sub253, float* %arrayidx254, align 1 + %arrayidx255 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %inc2757981 + %382 = or i1 false, false + br i1 %382, label %trap.loopexit73, label %383 + +383: ; preds = %381 + %384 = load float, float* %arrayidx255, align 1 + %sub256 = fsub float %384, 0xC120457D00000000 + %mul257 = fmul float 0x403384AE00000000, %sub256 + %arrayidx258 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %inc2757981 + %385 = or i1 false, false + br i1 %385, label %trap.loopexit73, label %386 + +386: ; preds = %383 + store float %mul257, float* %arrayidx258, align 1 + %arrayidx259 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %inc2757981 + %387 = or i1 false, false + br i1 %387, label %trap.loopexit73, label %388 + +388: ; preds = %386 + %389 = load float, float* %arrayidx259, align 1 + %arrayidx260 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %inc2757981 + %390 = or i1 false, false + br i1 %390, label %trap.loopexit73, label %391 + +391: ; preds = %388 + %392 = load float, float* %arrayidx260, align 1 + %add261 = fadd float %392, 0x40809C82E0000000 + %mul262 = fmul float %389, %add261 + %sub263 = fsub float 0x408A2821C0000000, %mul262 + %arrayidx264 = getelementptr [28 x float], [28 x float]* @v_476, i32 0, i32 %inc2757981 + %393 = or i1 false, false + br i1 %393, label %trap.loopexit73, label %394 + +394: ; preds = %391 + store float %sub263, float* %arrayidx264, align 1 + %arrayidx265 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %inc2757981 + %395 = or i1 false, false + br i1 %395, label %trap.loopexit73, label %396 + +396: ; preds = %394 + %397 = load float, float* %arrayidx265, align 1 + %arrayidx266 = getelementptr [28 x float], [28 x float]* @v_476, i32 0, i32 %inc2757981 + %398 = or i1 false, false + br i1 %398, label %trap.loopexit73, label %399 + +399: ; preds = %396 + %400 = load float, float* %arrayidx266, align 1 + %add267 = fadd float %397, %400 + %arrayidx268 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %inc2757981 + %401 = or i1 false, false + br i1 %401, label %trap.loopexit73, label %402 + +402: ; preds = %399 + store float %add267, float* %arrayidx268, align 1 + %arrayidx269 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %inc2757981 + %403 = or i1 false, false + br i1 %403, label %trap.loopexit73, label %404 + +404: ; preds = %402 + store float 0xC0961133C0000000, float* %arrayidx269, align 1 + %arrayidx270 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %inc2757981 + %405 = or i1 false, false + br i1 %405, label %trap.loopexit73, label %406 + +406: ; preds = %404 + store float 0x4032AD0000000000, float* %arrayidx270, align 1 + %arrayidx271 = getelementptr [28 x float], [28 x float]* @v_476, i32 0, i32 %inc2757981 + %407 = or i1 false, false + br i1 %407, label %trap.loopexit73, label %408 + +408: ; preds = %406 + %409 = load float, float* %arrayidx271, align 1 + %add272 = fadd float %409, 0x4079782500000000 + %arrayidx273 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %inc2757981 + %410 = or i1 false, false + br i1 %410, label %trap.loopexit73, label %for.inc274 + +for.inc274: ; preds = %408 + store float %add272, float* %arrayidx273, align 1 + %inc275 = add i32 %inc2757981, 1 + %cmp247 = icmp slt i32 %inc275, 28 + br i1 %cmp247, label %for.body250, label %for.end276 + +for.end276: ; preds = %for.inc274 + %inc27579.lcssa = phi i32 [ %inc275, %for.inc274 ] + store i32 %inc27579.lcssa, i32* %v_loop_1, align 1 + %411 = bitcast i32* %v_loop_1 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %411) #1 + store i64 -1, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_19, i32 0, i32 0), align 1 + store i64 -1763766102897696120, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_19, i32 0, i32 1), align 1 + %412 = load volatile i64*, i64** @v_457, align 1 + store volatile i64* %412, i64** @v_478, align 1 + %413 = bitcast i16* %v_479 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %413) #1 + %414 = load i16, i16* @v_6, align 1 + %dec277 = add i16 %414, -1 + store i16 %dec277, i16* @v_6, align 1 + store i16 %dec277, i16* %v_479, align 1 + %415 = bitcast i32* %v_480 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %415) #1 + store i32 3, i32* %v_480, align 1 + %416 = bitcast { i24, i24 }* %v_481 to i8* + %v_481.realp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %v_481, i32 0, i32 0 + %v_481.imagp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %v_481, i32 0, i32 1 + %417 = bitcast i64* %v_482 to i8* + %418 = bitcast i64* %y285 to i8* + %419 = bitcast i16* %y1286 to i8* + %420 = load i16, i16* %v_479, align 1 + %421 = load i16, i16* @v_7, align 1 + %422 = bitcast i16* %y1286 to i8* + %423 = bitcast i64* %y285 to i8* + %424 = load i32**, i32*** getelementptr inbounds ([6 x i32**], [6 x i32**]* @v_488, i32 0, i32 0), align 1 + %425 = load i16, i16* @v_111, align 1 + %resize306 = sext i16 %425 to i32 + %upscale = shl i32 %resize306, 16 + %426 = call i32 @llvm.smul.fix.i32(i32 196608, i32 %upscale, i32 31) + %427 = call i32 @llvm.smul.fix.i32(i32 %426, i32 1264975872, i32 31) + %428 = bitcast i64* %v_482 to i8* + %429 = bitcast { i24, i24 }* %v_481 to i8* + %v_456.promoted = load i64, i64* @v_456, align 1 + %tmp287.promoted = load i16, i16* %tmp287, align 1 + %tmp298.promoted = load i64, i64* %tmp298, align 1 + %v_480.promoted = load i32, i32* %v_480, align 1 + br label %for.body282 + +for.body282: ; preds = %for.inc307, %for.end276 + %inc2838289 = phi i64 [ %v_456.promoted, %for.end276 ], [ %inc283, %for.inc307 ] + %430 = phi i64 [ %tmp298.promoted, %for.end276 ], [ %cond305, %for.inc307 ] + %inc3088688 = phi i32 [ %v_480.promoted, %for.end276 ], [ %inc308, %for.inc307 ] + call void @llvm.lifetime.start.p0i8(i64 4, i8* %416) #1 + store i24 -2755177, i24* %v_481.realp, align 1 + store i24 -6344874, i24* %v_481.imagp, align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %417) #1 + %inc283 = add i64 %inc2838289, 1 + %add284 = add i64 281474976710654, %inc2838289 + store i64 %add284, i64* %v_482, align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %418) #1 + call void @llvm.lifetime.start.p0i8(i64 1, i8* %419) #1 + store i16 %420, i16* %y1286, align 1 + %431 = load i16, i16* %y1286, align 1 + %cmp288 = icmp eq i16 %431, 0 + br i1 %cmp288, label %cond.end293, label %cond.false291 + +cond.false291: ; preds = %for.body282 + %432 = load i16, i16* %y1286, align 1 + %div292 = udiv i16 %421, %432 + br label %cond.end293 + +cond.end293: ; preds = %cond.false291, %for.body282 + %cond294 = phi i16 [ %div292, %cond.false291 ], [ 32767, %for.body282 ] + call void @llvm.lifetime.end.p0i8(i64 1, i8* %422) #1 + %rem295 = urem i16 %cond294, 7 + %idxprom296 = zext i16 %rem295 to i32 + %arrayidx297.idx = mul i16 %rem295, 4 + %arrayidx297 = getelementptr [7 x i64], [7 x i64]* @v_484, i32 0, i32 %idxprom296 + %433 = sub i16 28, %arrayidx297.idx + %434 = icmp ult i16 28, %arrayidx297.idx + %435 = icmp ult i16 %433, 4 + %436 = or i1 %434, %435 + br i1 %436, label %trap.loopexit72, label %437 + +437: ; preds = %cond.end293 + %438 = load i64, i64* %arrayidx297, align 1 + store i64 %438, i64* %y285, align 1 + %439 = load i64, i64* %y285, align 1 + %cmp299 = icmp eq i64 %439, -9223372036854775808 + br i1 %cmp299, label %for.inc307, label %cond.false302 + +cond.false302: ; preds = %437 + %440 = load i64, i64* %y285, align 1 + %dec303 = add i64 %440, -1 + store i64 %dec303, i64* %y285, align 1 + br label %for.inc307 + +for.inc307: ; preds = %cond.false302, %437 + %cond305 = phi i64 [ %440, %cond.false302 ], [ 9223372036854775807, %437 ] + call void @llvm.lifetime.end.p0i8(i64 4, i8* %423) #1 + store i64 %cond305, i64* @v_92, align 1 + %441 = load i32*, i32** %424, align 1 + store i32* %441, i32** @v_486, align 1 + store i32 %427, i32* @v_120, align 1 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %428) #1 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %429) #1 + %inc308 = add i32 %inc3088688, 1 + %cmp279 = icmp slt i32 %inc308, 17 + br i1 %cmp279, label %for.body282, label %for.end309 + +for.end309: ; preds = %for.inc307 + %inc30886.lcssa = phi i32 [ %inc308, %for.inc307 ] + %.lcssa84 = phi i64 [ %cond305, %for.inc307 ] + %.lcssa83 = phi i16 [ %cond294, %for.inc307 ] + %inc28382.lcssa = phi i64 [ %inc283, %for.inc307 ] + store i64 %inc28382.lcssa, i64* @v_456, align 1 + store i16 %.lcssa83, i16* %tmp287, align 1 + store i64 %.lcssa84, i64* %tmp298, align 1 + store i32 %inc30886.lcssa, i32* %v_480, align 1 + %442 = bitcast i32* %v_480 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %442) #1 + %443 = load i24, i24* @v_349, align 1 + store i24 %443, i24* @v_420, align 1 + %444 = bitcast { i64, i64 }* %v_493 to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* %444) #1 + %445 = load [2 x { i64, i64 }*]*, [2 x { i64, i64 }*]** @v_492, align 1 + %446 = load i16, i16* @v_6, align 1 + %inc310 = add i16 %446, 1 + store i16 %inc310, i16* @v_6, align 1 + %rem311 = urem i16 %446, 2 + %idxprom312 = zext i16 %rem311 to i32 + %arrayidx313 = getelementptr [2 x { i64, i64 }*], [2 x { i64, i64 }*]* %445, i32 0, i32 %idxprom312 + %447 = load { i64, i64 }*, { i64, i64 }** %arrayidx313, align 1 + %.realp314 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %447, i32 0, i32 0 + %.real315 = load i64, i64* %.realp314, align 1 + %.imagp316 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %447, i32 0, i32 1 + %.imag317 = load i64, i64* %.imagp316, align 1 + %v_493.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %v_493, i32 0, i32 0 + %v_493.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %v_493, i32 0, i32 1 + store i64 %.real315, i64* %v_493.realp, align 1 + store i64 %.imag317, i64* %v_493.imagp, align 1 + %v_187.real = load i64, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_187, i32 0, i32 0), align 1 + %v_187.imag = load i64, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_187, i32 0, i32 1), align 1 + store i64 %v_187.real, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_306, i32 0, i32 0), align 1 + store i64 %v_187.imag, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_306, i32 0, i32 1), align 1 + %448 = bitcast i40* %v_496 to i8* + call void @llvm.lifetime.start.p0i8(i64 3, i8* %448) #1 + %449 = load i40*, i40** @v_495, align 1 + %450 = load i40, i40* %449, align 1 + store i40 %450, i40* %v_496, align 1 + %451 = load i32**, i32*** @v_487, align 1 + %452 = load i32*, i32** %451, align 1 + %453 = load i32, i32* %452, align 1 + %454 = load i32**, i32*** @v_487, align 1 + %455 = load i32*, i32** %454, align 1 + %456 = load i32, i32* %455, align 1 + %457 = icmp slt i32 %453, %456 + br i1 %457, label %if.then355, label %lor.lhs.false319 + +lor.lhs.false319: ; preds = %for.end309 + %458 = bitcast i16* %y0320 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %458) #1 + store i16 -510, i16* %y0320, align 1 + %459 = bitcast i16* %y1321 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %459) #1 + %460 = load i16, i16* @v_48, align 1 + store i16 %460, i16* %y1321, align 1 + %461 = load i16, i16* %y0320, align 1 + %cmp323 = icmp sgt i16 %461, 0 + br i1 %cmp323, label %land.lhs.true325, label %lor.lhs.false332 + +land.lhs.true325: ; preds = %lor.lhs.false319 + %462 = load i16, i16* %y1321, align 1 + %cmp326 = icmp slt i16 %462, 0 + br i1 %cmp326, label %land.lhs.true328, label %lor.lhs.false342thread-pre-split + +land.lhs.true328: ; preds = %land.lhs.true325 + %463 = load i16, i16* %y0320, align 1 + %464 = load i16, i16* %y1321, align 1 + %add329 = add i16 32767, %464 + %cmp330 = icmp sgt i16 %463, %add329 + br i1 %cmp330, label %cond.end351, label %lor.lhs.false332 + +lor.lhs.false332: ; preds = %land.lhs.true328, %lor.lhs.false319 + %465 = phi i16 [ %463, %land.lhs.true328 ], [ %461, %lor.lhs.false319 ] + %cmp333 = icmp slt i16 %465, 0 + br i1 %cmp333, label %land.lhs.true335, label %lor.lhs.false342thread-pre-split + +land.lhs.true335: ; preds = %lor.lhs.false332 + %466 = load i16, i16* %y1321, align 1 + %cmp336 = icmp sgt i16 %466, 0 + br i1 %cmp336, label %land.lhs.true338, label %lor.lhs.false342thread-pre-split + +land.lhs.true338: ; preds = %land.lhs.true335 + %467 = load i16, i16* %y0320, align 1 + %468 = load i16, i16* %y1321, align 1 + %add339 = add i16 -32768, %468 + %cmp340 = icmp slt i16 %467, %add339 + br i1 %cmp340, label %cond.end351, label %lor.lhs.false342 + +lor.lhs.false342thread-pre-split: ; preds = %land.lhs.true335, %lor.lhs.false332, %land.lhs.true325 + %.pr39 = load i16, i16* %y0320, align 1 + br label %lor.lhs.false342 + +lor.lhs.false342: ; preds = %lor.lhs.false342thread-pre-split, %land.lhs.true338 + %469 = phi i16 [ %.pr39, %lor.lhs.false342thread-pre-split ], [ %467, %land.lhs.true338 ] + %cmp343 = icmp eq i16 %469, 0 + br i1 %cmp343, label %land.lhs.true345, label %cond.false349 + +land.lhs.true345: ; preds = %lor.lhs.false342 + %470 = load i16, i16* %y1321, align 1 + %cmp346 = icmp eq i16 %470, -32768 + br i1 %cmp346, label %cond.end351, label %cond.false349 + +cond.false349: ; preds = %land.lhs.true345, %lor.lhs.false342 + %471 = load i16, i16* %y0320, align 1 + %472 = load i16, i16* %y1321, align 1 + %sub350 = sub i16 %471, %472 + br label %cond.end351 + +cond.end351: ; preds = %cond.false349, %land.lhs.true345, %land.lhs.true338, %land.lhs.true328 + %cond352 = phi i16 [ %sub350, %cond.false349 ], [ 0, %land.lhs.true345 ], [ 0, %land.lhs.true338 ], [ 0, %land.lhs.true328 ] + store i16 %cond352, i16* %tmp322, align 1 + %473 = bitcast i16* %y1321 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %473) #1 + %474 = bitcast i16* %y0320 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %474) #1 + %475 = load i16, i16* %tmp322, align 1 + %476 = load i16**, i16*** @v_499, align 1 + %477 = load i16*, i16** %476, align 1 + %478 = load volatile i16, i16* %477, align 1 + %cmp353 = icmp sle i16 %475, %478 + br i1 %cmp353, label %if.then355, label %if.else506 + +if.then355: ; preds = %cond.end351, %for.end309 + %479 = load { double, double }***, { double, double }**** @v_503, align 1 + %480 = load { double, double }**, { double, double }*** %479, align 1 + %481 = load { double, double }*, { double, double }** %480, align 1 + %.realp356 = getelementptr inbounds { double, double }, { double, double }* %481, i32 0, i32 0 + %.real357 = load double, double* %.realp356, align 1 + %.imagp358 = getelementptr inbounds { double, double }, { double, double }* %481, i32 0, i32 1 + %.imag359 = load double, double* %.imagp358, align 1 + store double %.real357, double* getelementptr inbounds ({ double, double }, { double, double }* @v_500, i32 0, i32 0), align 1 + store double %.imag359, double* getelementptr inbounds ({ double, double }, { double, double }* @v_500, i32 0, i32 1), align 1 + %482 = load i32**, i32*** @v_506, align 1 + %483 = load i32*, i32** %482, align 1 + %484 = load i32, i32* %483, align 1 + %dec360 = add i32 %484, -1 + store i32 %dec360, i32* %483, align 1 + store i32 %484, i32* @v_215, align 1 + %485 = bitcast i16* %v_loop_2 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %485) #1 + store i16 0, i16* %v_loop_2, align 1 + br label %for.body365 + +for.body365: ; preds = %for.inc420, %if.then355 + %486 = load i16, i16* %v_loop_2, align 1 + %idxprom366 = sext i16 %486 to i32 + %487 = sub i32 1, %idxprom366 + %488 = icmp ult i32 1, %idxprom366 + %489 = icmp ult i32 %487, 1 + %490 = or i1 %488, %489 + br i1 %490, label %trap.loopexit71, label %491 + +491: ; preds = %for.body365 + %492 = load i16, i16* %v_loop_2, align 1 + %idxprom368 = sext i16 %492 to i32 + %arrayidx369 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom368 + %493 = sub i32 1, %idxprom368 + %494 = icmp ult i32 1, %idxprom368 + %495 = icmp ult i32 %493, 1 + %496 = or i1 %494, %495 + br i1 %496, label %trap.loopexit71, label %497 + +497: ; preds = %491 + store i16 0, i16* %arrayidx369, align 1 + %498 = load i16, i16* %v_loop_2, align 1 + %idxprom370 = sext i16 %498 to i32 + %arrayidx371 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom370 + %499 = sub i32 1, %idxprom370 + %500 = icmp ult i32 1, %idxprom370 + %501 = icmp ult i32 %499, 1 + %502 = or i1 %500, %501 + br i1 %502, label %trap.loopexit71, label %503 + +503: ; preds = %497 + %504 = load i16, i16* %arrayidx371, align 1 + %505 = call i16 @llvm.smul.fix.i16(i16 3, i16 %504, i32 15) + %506 = add i16 31982, 20183 + %507 = call i16 @llvm.smul.fix.i16(i16 %505, i16 %506, i32 15) + %508 = load i16, i16* %v_loop_2, align 1 + %idxprom372 = sext i16 %508 to i32 + %arrayidx373 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom372 + %509 = sub i32 1, %idxprom372 + %510 = icmp ult i32 1, %idxprom372 + %511 = icmp ult i32 %509, 1 + %512 = or i1 %510, %511 + br i1 %512, label %trap.loopexit71, label %513 + +513: ; preds = %503 + store i16 %507, i16* %arrayidx373, align 1 + %514 = load i16, i16* %v_loop_2, align 1 + %idxprom374 = sext i16 %514 to i32 + %arrayidx375 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom374 + %515 = sub i32 1, %idxprom374 + %516 = icmp ult i32 1, %idxprom374 + %517 = icmp ult i32 %515, 1 + %518 = or i1 %516, %517 + br i1 %518, label %trap.loopexit71, label %519 + +519: ; preds = %513 + %520 = load i16, i16* %arrayidx375, align 1 + %521 = call i16 @llvm.smul.fix.i16(i16 -1, i16 %520, i32 15) + %522 = call i16 @llvm.smul.fix.i16(i16 -2, i16 %521, i32 15) + %523 = load i16, i16* %v_loop_2, align 1 + %idxprom376 = sext i16 %523 to i32 + %arrayidx377 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom376 + %524 = sub i32 1, %idxprom376 + %525 = icmp ult i32 1, %idxprom376 + %526 = icmp ult i32 %524, 1 + %527 = or i1 %525, %526 + br i1 %527, label %trap.loopexit71, label %528 + +528: ; preds = %519 + store i16 %522, i16* %arrayidx377, align 1 + %529 = load i16, i16* %v_loop_2, align 1 + %idxprom378 = sext i16 %529 to i32 + %arrayidx379 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom378 + %530 = sub i32 1, %idxprom378 + %531 = icmp ult i32 1, %idxprom378 + %532 = icmp ult i32 %530, 1 + %533 = or i1 %531, %532 + br i1 %533, label %trap.loopexit71, label %534 + +534: ; preds = %528 + %535 = load i16, i16* %arrayidx379, align 1 + %536 = call i16 @llvm.smul.fix.i16(i16 3, i16 %535, i32 15) + %537 = load i16, i16* %v_loop_2, align 1 + %idxprom380 = sext i16 %537 to i32 + %arrayidx381 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom380 + %538 = sub i32 1, %idxprom380 + %539 = icmp ult i32 1, %idxprom380 + %540 = icmp ult i32 %538, 1 + %541 = or i1 %539, %540 + br i1 %541, label %trap.loopexit71, label %542 + +542: ; preds = %534 + %543 = load i16, i16* %arrayidx381, align 1 + %544 = load i16, i16* %v_loop_2, align 1 + %idxprom382 = sext i16 %544 to i32 + %arrayidx383 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom382 + %545 = sub i32 1, %idxprom382 + %546 = icmp ult i32 1, %idxprom382 + %547 = icmp ult i32 %545, 1 + %548 = or i1 %546, %547 + br i1 %548, label %trap.loopexit71, label %549 + +549: ; preds = %542 + %550 = load i16, i16* %arrayidx383, align 1 + %551 = sub i16 %543, %550 + %552 = call i16 @llvm.smul.fix.i16(i16 %536, i16 %551, i32 15) + %553 = load i16, i16* %v_loop_2, align 1 + %idxprom384 = sext i16 %553 to i32 + %arrayidx385 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom384 + %554 = sub i32 1, %idxprom384 + %555 = icmp ult i32 1, %idxprom384 + %556 = icmp ult i32 %554, 1 + %557 = or i1 %555, %556 + br i1 %557, label %trap.loopexit71, label %558 + +558: ; preds = %549 + %559 = load i16, i16* %arrayidx385, align 1 + %560 = add i16 %552, %559 + %561 = load i16, i16* %v_loop_2, align 1 + %idxprom386 = sext i16 %561 to i32 + %arrayidx387 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom386 + %562 = sub i32 1, %idxprom386 + %563 = icmp ult i32 1, %idxprom386 + %564 = icmp ult i32 %562, 1 + %565 = or i1 %563, %564 + br i1 %565, label %trap.loopexit71, label %566 + +566: ; preds = %558 + store i16 %560, i16* %arrayidx387, align 1 + %567 = load i16, i16* %v_loop_2, align 1 + %idxprom388 = sext i16 %567 to i32 + %arrayidx389 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom388 + %568 = sub i32 1, %idxprom388 + %569 = icmp ult i32 1, %idxprom388 + %570 = icmp ult i32 %568, 1 + %571 = or i1 %569, %570 + br i1 %571, label %trap.loopexit71, label %572 + +572: ; preds = %566 + %573 = load i16, i16* %arrayidx389, align 1 + %574 = add i16 %573, 27131 + %575 = load i16, i16* %v_loop_2, align 1 + %idxprom390 = sext i16 %575 to i32 + %arrayidx391 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom390 + %576 = sub i32 1, %idxprom390 + %577 = icmp ult i32 1, %idxprom390 + %578 = icmp ult i32 %576, 1 + %579 = or i1 %577, %578 + br i1 %579, label %trap.loopexit71, label %580 + +580: ; preds = %572 + store i16 %574, i16* %arrayidx391, align 1 + %581 = load i16, i16* %v_loop_2, align 1 + %idxprom392 = sext i16 %581 to i32 + %arrayidx393 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom392 + %582 = sub i32 1, %idxprom392 + %583 = icmp ult i32 1, %idxprom392 + %584 = icmp ult i32 %582, 1 + %585 = or i1 %583, %584 + br i1 %585, label %trap.loopexit71, label %586 + +586: ; preds = %580 + %587 = load i16, i16* %arrayidx393, align 1 + %588 = sub i16 -258, %587 + %589 = load i16, i16* %v_loop_2, align 1 + %idxprom394 = sext i16 %589 to i32 + %arrayidx395 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom394 + %590 = sub i32 1, %idxprom394 + %591 = icmp ult i32 1, %idxprom394 + %592 = icmp ult i32 %590, 1 + %593 = or i1 %591, %592 + br i1 %593, label %trap.loopexit71, label %594 + +594: ; preds = %586 + store i16 %588, i16* %arrayidx395, align 1 + %595 = load i16, i16* %v_loop_2, align 1 + %idxprom396 = sext i16 %595 to i32 + %arrayidx397 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom396 + %596 = sub i32 1, %idxprom396 + %597 = icmp ult i32 1, %idxprom396 + %598 = icmp ult i32 %596, 1 + %599 = or i1 %597, %598 + br i1 %599, label %trap.loopexit71, label %600 + +600: ; preds = %594 + %601 = load i16, i16* %arrayidx397, align 1 + %602 = load i16, i16* %v_loop_2, align 1 + %idxprom398 = sext i16 %602 to i32 + %arrayidx399 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom398 + %603 = sub i32 1, %idxprom398 + %604 = icmp ult i32 1, %idxprom398 + %605 = icmp ult i32 %603, 1 + %606 = or i1 %604, %605 + br i1 %606, label %trap.loopexit71, label %607 + +607: ; preds = %600 + %608 = load i16, i16* %arrayidx399, align 1 + %609 = call i16 @llvm.cowabunga.add.i16(i16 %601, i16 %608) + %610 = load i16, i16* %v_loop_2, align 1 + %idxprom400 = sext i16 %610 to i32 + %arrayidx401 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom400 + %611 = sub i32 1, %idxprom400 + %612 = icmp ult i32 1, %idxprom400 + %613 = icmp ult i32 %611, 1 + %614 = or i1 %612, %613 + br i1 %614, label %trap.loopexit71, label %615 + +615: ; preds = %607 + %616 = load i16, i16* %arrayidx401, align 1 + %617 = call i16 @llvm.cowabunga.add.i16(i16 %616, i16 -20718) + %618 = call i16 @llvm.cowabunga.sub.i16(i16 %609, i16 %617) + %619 = load i16, i16* %v_loop_2, align 1 + %idxprom402 = sext i16 %619 to i32 + %arrayidx403 = getelementptr [1 x i16], [1 x i16]* @v_514, i32 0, i32 %idxprom402 + %620 = sub i32 1, %idxprom402 + %621 = icmp ult i32 1, %idxprom402 + %622 = icmp ult i32 %620, 1 + %623 = or i1 %621, %622 + br i1 %623, label %trap.loopexit71, label %624 + +624: ; preds = %615 + store i16 %618, i16* %arrayidx403, align 1 + %625 = load i16, i16* %v_loop_2, align 1 + %idxprom404 = sext i16 %625 to i32 + %arrayidx405 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom404 + %626 = sub i32 1, %idxprom404 + %627 = icmp ult i32 1, %idxprom404 + %628 = icmp ult i32 %626, 1 + %629 = or i1 %627, %628 + br i1 %629, label %trap.loopexit71, label %630 + +630: ; preds = %624 + %631 = load i16, i16* %arrayidx405, align 1 + %632 = call i16 @llvm.cowabunga.sub.i16(i16 %631, i16 25381) + %633 = load i16, i16* %v_loop_2, align 1 + %idxprom406 = sext i16 %633 to i32 + %arrayidx407 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom406 + %634 = sub i32 1, %idxprom406 + %635 = icmp ult i32 1, %idxprom406 + %636 = icmp ult i32 %634, 1 + %637 = or i1 %635, %636 + br i1 %637, label %trap.loopexit71, label %638 + +638: ; preds = %630 + store i16 %632, i16* %arrayidx407, align 1 + %639 = load i16, i16* %v_loop_2, align 1 + %idxprom408 = sext i16 %639 to i32 + %arrayidx409 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom408 + %640 = sub i32 1, %idxprom408 + %641 = icmp ult i32 1, %idxprom408 + %642 = icmp ult i32 %640, 1 + %643 = or i1 %641, %642 + br i1 %643, label %trap.loopexit71, label %644 + +644: ; preds = %638 + %645 = load i16, i16* %arrayidx409, align 1 + %646 = call i16 @llvm.smul.fix.i16(i16 3, i16 %645, i32 15) + %647 = load i16, i16* %v_loop_2, align 1 + %idxprom410 = sext i16 %647 to i32 + %arrayidx411 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom410 + %648 = sub i32 1, %idxprom410 + %649 = icmp ult i32 1, %idxprom410 + %650 = icmp ult i32 %648, 1 + %651 = or i1 %649, %650 + br i1 %651, label %trap.loopexit71, label %652 + +652: ; preds = %644 + %653 = load i16, i16* %arrayidx411, align 1 + %654 = call i16 @llvm.smul.fix.i16(i16 3, i16 %653, i32 15) + %655 = call i16 @llvm.smul.fix.i16(i16 %654, i16 -23940, i32 15) + %656 = call i16 @llvm.smul.fix.i16(i16 %646, i16 %655, i32 15) + %657 = load i16, i16* %v_loop_2, align 1 + %idxprom412 = sext i16 %657 to i32 + %arrayidx413 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom412 + %658 = sub i32 1, %idxprom412 + %659 = icmp ult i32 1, %idxprom412 + %660 = icmp ult i32 %658, 1 + %661 = or i1 %659, %660 + br i1 %661, label %trap.loopexit71, label %662 + +662: ; preds = %652 + store i16 %656, i16* %arrayidx413, align 1 + %663 = load i16, i16* %v_loop_2, align 1 + %idxprom414 = sext i16 %663 to i32 + %arrayidx415 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom414 + %664 = sub i32 1, %idxprom414 + %665 = icmp ult i32 1, %idxprom414 + %666 = icmp ult i32 %664, 1 + %667 = or i1 %665, %666 + br i1 %667, label %trap.loopexit71, label %668 + +668: ; preds = %662 + %669 = load i16, i16* %arrayidx415, align 1 + %670 = load i16, i16* %v_loop_2, align 1 + %idxprom416 = sext i16 %670 to i32 + %arrayidx417 = getelementptr [1 x i16], [1 x i16]* @v_514, i32 0, i32 %idxprom416 + %671 = sub i32 1, %idxprom416 + %672 = icmp ult i32 1, %idxprom416 + %673 = icmp ult i32 %671, 1 + %674 = or i1 %672, %673 + br i1 %674, label %trap.loopexit71, label %675 + +675: ; preds = %668 + %676 = load i16, i16* %arrayidx417, align 1 + %677 = call i16 @llvm.cowabunga.add.i16(i16 %669, i16 %676) + %678 = call i16 @llvm.cowabunga.sub.i16(i16 16164, i16 %677) + %679 = load i16, i16* %v_loop_2, align 1 + %idxprom418 = sext i16 %679 to i32 + %arrayidx419 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom418 + %680 = sub i32 1, %idxprom418 + %681 = icmp ult i32 1, %idxprom418 + %682 = icmp ult i32 %680, 1 + %683 = or i1 %681, %682 + br i1 %683, label %trap.loopexit71, label %for.inc420 + +for.inc420: ; preds = %675 + store i16 %678, i16* %arrayidx419, align 1 + %684 = load i16, i16* %v_loop_2, align 1 + %inc421 = add i16 %684, 1 + store i16 %inc421, i16* %v_loop_2, align 1 + %cmp362 = icmp slt i16 %inc421, 1 + br i1 %cmp362, label %for.body365, label %for.end422 + +for.end422: ; preds = %for.inc420 + %685 = bitcast i16* %v_loop_2 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %685) #1 + %686 = bitcast i64* %y0423 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %686) #1 + store i64 -281474976710655, i64* %y0423, align 1 + %687 = bitcast i64* %y1424 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %687) #1 + store i64 -1023, i64* %y1424, align 1 + %688 = load i64, i64* %y0423, align 1 + %cmp426 = icmp sgt i64 %688, 0 + br i1 %cmp426, label %land.lhs.true428, label %lor.lhs.false435 + +land.lhs.true428: ; preds = %for.end422 + %689 = load i64, i64* %y1424, align 1 + %cmp429 = icmp sgt i64 %689, 0 + br i1 %cmp429, label %land.lhs.true431, label %cond.false446 + +land.lhs.true431: ; preds = %land.lhs.true428 + %690 = load i64, i64* %y0423, align 1 + %691 = load i64, i64* %y1424, align 1 + %sub432 = sub i64 9223372036854775807, %691 + %cmp433 = icmp sgt i64 %690, %sub432 + br i1 %cmp433, label %cond.end448, label %lor.lhs.false435 + +lor.lhs.false435: ; preds = %land.lhs.true431, %for.end422 + %692 = phi i64 [ %690, %land.lhs.true431 ], [ %688, %for.end422 ] + %cmp436 = icmp slt i64 %692, 0 + br i1 %cmp436, label %land.lhs.true438, label %cond.false446 + +land.lhs.true438: ; preds = %lor.lhs.false435 + %693 = load i64, i64* %y1424, align 1 + %cmp439 = icmp slt i64 %693, 0 + br i1 %cmp439, label %land.lhs.true441, label %cond.false446 + +land.lhs.true441: ; preds = %land.lhs.true438 + %694 = load i64, i64* %y0423, align 1 + %695 = load i64, i64* %y1424, align 1 + %sub442 = sub i64 -9223372036854775808, %695 + %cmp443 = icmp slt i64 %694, %sub442 + br i1 %cmp443, label %cond.end448, label %cond.false446 + +cond.false446: ; preds = %land.lhs.true441, %land.lhs.true438, %lor.lhs.false435, %land.lhs.true428 + %696 = load i64, i64* %y0423, align 1 + %697 = load i64, i64* %y1424, align 1 + %add447 = add i64 %696, %697 + br label %cond.end448 + +cond.end448: ; preds = %cond.false446, %land.lhs.true441, %land.lhs.true431 + %cond449 = phi i64 [ %add447, %cond.false446 ], [ 0, %land.lhs.true441 ], [ 0, %land.lhs.true431 ] + store i64 %cond449, i64* %tmp425, align 1 + %698 = bitcast i64* %y1424 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %698) #1 + %699 = bitcast i64* %y0423 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %699) #1 + %700 = load i64, i64* %tmp425, align 1 + %701 = load volatile i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_515, i32 0, i32 1), align 1 + %conv450 = sext i32 %701 to i64 + %cmp451 = icmp eq i64 %700, %conv450 + br i1 %cmp451, label %if.then459, label %lor.lhs.false453 + +lor.lhs.false453: ; preds = %cond.end448 + %702 = load volatile i24, i24* @v_122, align 1 + %703 = load volatile i24, i24* @v_122, align 1 + %mul454 = call i24 @llvm.smul.fix.i24(i24 3, i24 %703, i32 15) + %unsclear = and i24 %mul454, 8388607 + %704 = load volatile i24, i24* @v_122, align 1 + %mul455 = call i24 @llvm.smul.fix.i24(i24 %unsclear, i24 %704, i32 15) + %unsclear456 = and i24 %mul455, 8388607 + %705 = call i24 @llvm.cowabunga.sub.a24(i24 %702, i24 %unsclear456) + %unsclear457 = and i24 %705, 8388607 + %706 = load volatile i24, i24* @v_346, align 1 + %707 = icmp ugt i24 %unsclear457, %706 + br i1 %707, label %if.then459, label %if.else489 + +if.then459: ; preds = %lor.lhs.false453, %cond.end448 + %708 = bitcast i64* %y460 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %708) #1 + %709 = load [6 x i64]*, [6 x i64]** @v_518, align 1 + %arrayidx461 = getelementptr [6 x i64], [6 x i64]* %709, i32 0, i32 1 + %710 = load i64, i64* %arrayidx461, align 1 + store i64 %710, i64* %y460, align 1 + %cmp463 = icmp eq i64 %710, -9223372036854775808 + br i1 %cmp463, label %cond.end468, label %cond.false466 + +cond.false466: ; preds = %if.then459 + %711 = load i64, i64* %y460, align 1 + %dec467 = add i64 %711, -1 + store i64 %dec467, i64* %y460, align 1 + br label %cond.end468 + +cond.end468: ; preds = %cond.false466, %if.then459 + %cond469 = phi i64 [ %dec467, %cond.false466 ], [ 9223372036854775807, %if.then459 ] + store i64 %cond469, i64* %tmp462, align 1 + %712 = bitcast i64* %y460 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %712) #1 + %713 = load i64, i64* %tmp462, align 1 + store i64 %713, i64* @v_296, align 1 + %714 = bitcast double* %v_521 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %714) #1 + %715 = load double*, double** @v_520, align 1 + %716 = load double, double* %715, align 1 + store double %716, double* %v_521, align 1 + %717 = load float, float* @v_475, align 1 + %718 = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_43, i32 0, i32 0), align 1 + %rem470 = urem i32 %718, 2 + %arrayidx471.idx = mul i32 %rem470, 2 + %719 = add i32 0, %arrayidx471.idx + %arrayidx471 = getelementptr [2 x float], [2 x float]* @v_523, i32 0, i32 %rem470 + %720 = sub i32 4, %719 + store float %717, float* %arrayidx471, align 1 + %721 = bitcast double** %v_529 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %721) #1 + %722 = load double****, double***** @v_528, align 1 + %723 = load double***, double**** %722, align 1 + %724 = load double**, double*** %723, align 1 + %725 = load double*, double** %724, align 1 + store double* %725, double** %v_529, align 1 + %726 = load i16, i16* @v_530, align 1 + %dec472 = add i16 %726, -1 + store i16 %dec472, i16* @v_530, align 1 + %727 = load i16*, i16** @v_532, align 1 + store i16 %726, i16* %727, align 1 + %728 = bitcast i24* %v_536 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %728) #1 + %729 = load volatile i24, i24* @v_385, align 1 + %mul473 = call i24 @llvm.smul.fix.i24(i24 3, i24 %729, i32 15) + %unsclear474 = and i24 %mul473, 8388607 + %mul475 = call i24 @llvm.smul.fix.i24(i24 %unsclear474, i24 4190854, i32 15) + %unsclear476 = and i24 %mul475, 8388607 + %mul477 = call i24 @llvm.smul.fix.i24(i24 3, i24 %unsclear476, i32 15) + %unsclear478 = and i24 %mul477, 8388607 + %730 = load i24**, i24*** @v_535, align 1 + %731 = load i24*, i24** %730, align 1 + %732 = load volatile i24, i24* %731, align 1 + %mul479 = call i24 @llvm.smul.fix.i24(i24 %unsclear478, i24 %732, i32 15) + %unsclear480 = and i24 %mul479, 8388607 + store volatile i24 %unsclear480, i24* %v_536, align 1 + %733 = load { double, double }*, { double, double }** @v_501, align 1 + %.realp481 = getelementptr inbounds { double, double }, { double, double }* %733, i32 0, i32 0 + %.real482 = load double, double* %.realp481, align 1 + %.imagp483 = getelementptr inbounds { double, double }, { double, double }* %733, i32 0, i32 1 + %.imag484 = load double, double* %.imagp483, align 1 + store double %.real482, double* getelementptr inbounds ({ double, double }, { double, double }* @v_500, i32 0, i32 0), align 1 + store double %.imag484, double* getelementptr inbounds ({ double, double }, { double, double }* @v_500, i32 0, i32 1), align 1 + %v_279.real = load i64, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_279, i32 0, i32 0), align 1 + %v_279.imag = load i64, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_279, i32 0, i32 1), align 1 + store i64 %v_279.real, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_462, i32 0, i32 0), align 1 + store i64 %v_279.imag, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_462, i32 0, i32 1), align 1 + %734 = load i16*, i16** @v_8, align 1 + %735 = load i16, i16* %734, align 1 + %dec485 = add i16 %735, -1 + store i16 %dec485, i16* %734, align 1 + %rem486 = urem i16 %dec485, 3 + %idxprom487 = zext i16 %rem486 to i32 + %arrayidx488.idx = mul i32 %idxprom487, 4 + %736 = add i32 0, %arrayidx488.idx + %arrayidx488 = getelementptr [3 x { i24, i24 }], [3 x { i24, i24 }]* @v_538, i32 0, i32 %idxprom487 + %arrayidx488.realp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %arrayidx488, i32 0, i32 0 + %737 = add i32 %736, 2 + %arrayidx488.imagp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %arrayidx488, i32 0, i32 1 + %738 = sub i32 12, %736 + %739 = icmp ult i32 12, %736 + %740 = icmp ult i32 %738, 2 + %741 = or i1 %739, %740 + br i1 %741, label %trap, label %742 + +742: ; preds = %cond.end468 + store i24 5561208, i24* %arrayidx488.realp, align 1 + %743 = sub i32 12, %737 + %744 = icmp ult i32 12, %737 + %745 = icmp ult i32 %743, 2 + %746 = or i1 %744, %745 + br i1 %746, label %trap, label %747 + +747: ; preds = %742 + store i24 -2704478, i24* %arrayidx488.imagp, align 1 + %748 = bitcast i24* %v_536 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %748) #1 + %749 = bitcast double** %v_529 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %749) #1 + %750 = bitcast double* %v_521 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %750) #1 + br label %if.end490 + +if.else489: ; preds = %lor.lhs.false453 + store double 0xC0752C37DB0CE698, double* @v_449, align 1 + br label %if.end490 + +if.end490: ; preds = %if.else489, %747 + %751 = bitcast double* %y0491 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %751) #1 + %752 = load double*, double** @v_540, align 1 + %753 = load double, double* %752, align 1 + %sub492 = fsub double 0x407FAAA428DC4236, %753 + store double %sub492, double* %y0491, align 1 + %754 = bitcast double* %y1493 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %754) #1 + %755 = load double*, double** @v_540, align 1 + %756 = load double, double* %755, align 1 + %757 = load double*, double** @v_540, align 1 + %758 = load double, double* %757, align 1 + %sub494 = fsub double %756, %758 + store double %sub494, double* %y1493, align 1 + %759 = load double, double* %y0491, align 1 + %cmp496 = fcmp oeq double %759, 0.000000e+00 + br i1 %cmp496, label %land.lhs.true498, label %cond.false502 + +land.lhs.true498: ; preds = %if.end490 + %760 = load double, double* %y1493, align 1 + %cmp499 = fcmp oeq double %760, 0.000000e+00 + br i1 %cmp499, label %cond.true501, label %cond.false502 + +cond.true501: ; preds = %land.lhs.true498 + %761 = load double, double* %y0491, align 1 + br label %cond.end504 + +cond.false502: ; preds = %land.lhs.true498, %if.end490 + %762 = load double, double* %y0491, align 1 + %763 = load double, double* %y1493, align 1 + %div503 = fdiv double %762, %763 + br label %cond.end504 + +cond.end504: ; preds = %cond.false502, %cond.true501 + %cond505 = phi double [ %761, %cond.true501 ], [ %div503, %cond.false502 ] + store double %cond505, double* %tmp495, align 1 + %764 = bitcast double* %y1493 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %764) #1 + %765 = bitcast double* %y0491 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %765) #1 + %766 = load double, double* %tmp495, align 1 + store double %766, double* @v_207, align 1 + store i64 511, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_279, i32 0, i32 0), align 1 + store i64 -17179869182, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_279, i32 0, i32 1), align 1 + store i40 440679391208, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_541, i32 0, i32 0), align 1 + store i40 155371197868, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_541, i32 0, i32 1), align 1 + br label %if.end509 + +if.else506: ; preds = %cond.end351 + %767 = load i16*, i16** @v_543, align 1 + %768 = load i16, i16* %767, align 1 + %inc507 = add i16 %768, 1 + store i16 %inc507, i16* %767, align 1 + %mul508 = mul i16 %inc507, -16383 + store i16 %mul508, i16* @v_544, align 1 + %769 = load i64**, i64*** @v_547, align 1 + %770 = load i64*, i64** %769, align 1 + store i64 -6532667314313983100, i64* %770, align 1 + br label %if.end509 + +if.end509: ; preds = %if.else506, %cond.end504 + store volatile i24 3278651, i24* @v_266, align 1 + %771 = load { i32, i32 }*, { i32, i32 }** @v_65, align 1 + store { i32, i32 }* %771, { i32, i32 }** @v_286, align 1 + %772 = bitcast double* %v_549 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %772) #1 + %773 = load double, double* @v_519, align 1 + %mul510 = call i32 @llvm.smul.fix.i32(i32 196608, i32 1358991282, i32 31) + %unsclear511 = and i32 %mul510, 2147483647 + %mul512 = call i32 @llvm.smul.fix.i32(i32 %unsclear511, i32 1098988457, i32 31) + %unsclear513 = and i32 %mul512, 2147483647 + %mul514 = call i32 @llvm.smul.fix.i32(i32 196608, i32 %unsclear513, i32 31) + %unsclear515 = and i32 %mul514, 2147483647 + %774 = load i32, i32* @v_548, align 1 + %mul516 = call i32 @llvm.smul.fix.i32(i32 %unsclear515, i32 %774, i32 31) + %unsclear517 = and i32 %mul516, 2147483647 + %775 = uitofp i32 %unsclear517 to double + %776 = fmul double %775, 0x3E00000000000000 + %sub518 = fsub double %773, %776 + store double %sub518, double* %v_549, align 1 + %777 = load i40**, i40*** @v_552, align 1 + %778 = load i40*, i40** %777, align 1 + %779 = load i40, i40* %778, align 1 + store i40 %779, i40* @v_430, align 1 + %780 = bitcast float* %v_557 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %780) #1 + %781 = load i32, i32* @v_228, align 1 + %rem519 = urem i32 %781, 6 + %rem519.c = trunc i32 %rem519 to i16 + %782 = add i16 0, %rem519.c + %arrayidx520 = getelementptr [6 x float**], [6 x float**]* @v_556, i32 0, i32 %rem519 + %783 = sub i16 6, %782 + %784 = icmp ult i16 6, %782 + %785 = icmp ult i16 %783, 1 + %786 = or i1 %784, %785 + br i1 %786, label %trap, label %787 + +787: ; preds = %if.end509 + %788 = load float**, float*** %arrayidx520, align 1 + %789 = load float*, float** %788, align 1 + %790 = load float, float* %789, align 1 + store float %790, float* %v_557, align 1 + store i16 -1022, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_40, i32 0, i32 0), align 1 + store i16 -14586, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_40, i32 0, i32 1), align 1 + %v_417.real = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_417, i32 0, i32 0), align 1 + %v_417.imag = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_417, i32 0, i32 1), align 1 + store i16 %v_417.real, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_372, i32 0, i32 0), align 1 + store i16 %v_417.imag, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_372, i32 0, i32 1), align 1 + %v_541.real = load i40, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_541, i32 0, i32 0), align 1 + %v_541.imag = load i40, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_541, i32 0, i32 1), align 1 + store i40 %v_541.real, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_558, i32 0, i32 0), align 1 + store i40 %v_541.imag, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_558, i32 0, i32 1), align 1 + %791 = load float, float* @v_473, align 1 + store float %791, float* @v_214, align 1 + %792 = bitcast { i40, i40 }* %v_559 to i8* + call void @llvm.lifetime.start.p0i8(i64 6, i8* %792) #1 + %v_541.real521 = load i40, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_541, i32 0, i32 0), align 1 + %v_541.imag522 = load i40, i40* getelementptr inbounds ({ i40, i40 }, { i40, i40 }* @v_541, i32 0, i32 1), align 1 + %v_559.realp = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_559, i32 0, i32 0 + %v_559.imagp = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_559, i32 0, i32 1 + store i40 %v_541.real521, i40* %v_559.realp, align 1 + store i40 %v_541.imag522, i40* %v_559.imagp, align 1 + %793 = load { i64, i64 }*, { i64, i64 }** @v_463, align 1 + %.realp523 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %793, i32 0, i32 0 + %.real524 = load i64, i64* %.realp523, align 1 + %.imagp525 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %793, i32 0, i32 1 + %.imag526 = load i64, i64* %.imagp525, align 1 + store i64 %.real524, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_58, i32 0, i32 0), align 1 + store i64 %.imag526, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_58, i32 0, i32 1), align 1 + %794 = bitcast { i16, i16 }* %v_560 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %794) #1 + %v_40.real = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_40, i32 0, i32 0), align 1 + %v_40.imag = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_40, i32 0, i32 1), align 1 + %v_560.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_560, i32 0, i32 0 + %v_560.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_560, i32 0, i32 1 + store i16 %v_40.real, i16* %v_560.realp, align 1 + store i16 %v_40.imag, i16* %v_560.imagp, align 1 + %795 = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_561, i32 0, i32 1), align 1 + store i16 %795, i16* @v_531, align 1 + %796 = bitcast i16* %v_562 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %796) #1 + store i16 66, i16* %v_562, align 1 + %797 = bitcast { i16, i16 }* %v_572 to i8* + %v_572.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_572, i32 0, i32 0 + %v_572.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_572, i32 0, i32 1 + %798 = bitcast i16* %v_575 to i8* + %799 = bitcast i64* %v_loop_2541 to i8* + %tmp572.realp = getelementptr inbounds { double, double }, { double, double }* %tmp572, i32 0, i32 0 + %tmp572.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp572, i32 0, i32 1 + %tmp585.realp = getelementptr inbounds { double, double }, { double, double }* %tmp585, i32 0, i32 0 + %tmp585.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp585, i32 0, i32 1 + %tmp599.realp = getelementptr inbounds { double, double }, { double, double }* %tmp599, i32 0, i32 0 + %tmp599.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp599, i32 0, i32 1 + %tmp621.realp = getelementptr inbounds { double, double }, { double, double }* %tmp621, i32 0, i32 0 + %tmp621.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp621, i32 0, i32 1 + %tmp649.realp = getelementptr inbounds { double, double }, { double, double }* %tmp649, i32 0, i32 0 + %tmp649.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp649, i32 0, i32 1 + %tmp671.realp = getelementptr inbounds { double, double }, { double, double }* %tmp671, i32 0, i32 0 + %tmp671.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp671, i32 0, i32 1 + %tmp703.realp = getelementptr inbounds { double, double }, { double, double }* %tmp703, i32 0, i32 0 + %tmp703.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp703, i32 0, i32 1 + %tmp717.realp = getelementptr inbounds { double, double }, { double, double }* %tmp717, i32 0, i32 0 + %tmp717.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp717, i32 0, i32 1 + %tmp737.realp = getelementptr inbounds { double, double }, { double, double }* %tmp737, i32 0, i32 0 + %tmp737.imagp = getelementptr inbounds { double, double }, { double, double }* %tmp737, i32 0, i32 1 + %800 = bitcast i64* %v_loop_2541 to i8* + %801 = bitcast i24* %v_586 to i8* + %802 = bitcast i24* %v_586 to i8* + %803 = bitcast i16* %v_575 to i8* + %804 = bitcast { i16, i16 }* %v_572 to i8* + %v_562.promoted = load i16, i16* %v_562, align 1 + br label %for.body531 + +for.body531: ; preds = %for.inc761, %787 + %inc7629092 = phi i16 [ %v_562.promoted, %787 ], [ %inc762, %for.inc761 ] + store i32 727094605, i32* @v_425, align 1 + %v_378.real = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_378, i32 0, i32 0), align 1 + %v_378.imag = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_378, i32 0, i32 1), align 1 + store i16 %v_378.real, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_334, i32 0, i32 0), align 1 + store i16 %v_378.imag, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_334, i32 0, i32 1), align 1 + %805 = load { i16, i16 }*, { i16, i16 }** @v_564, align 1 + %.realp532 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %805, i32 0, i32 0 + %.real533 = load i16, i16* %.realp532, align 1 + %.imagp534 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %805, i32 0, i32 1 + %.imag535 = load i16, i16* %.imagp534, align 1 + store i16 %.real533, i16* getelementptr inbounds ([1 x { i16, i16 }], [1 x { i16, i16 }]* @v_566, i16 0, i16 0, i32 0), align 1 + store i16 %.imag535, i16* getelementptr inbounds ([1 x { i16, i16 }], [1 x { i16, i16 }]* @v_566, i16 0, i16 0, i32 1), align 1 + %v_500.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @v_500, i32 0, i32 0), align 1 + %v_500.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @v_500, i32 0, i32 1), align 1 + %806 = load { double, double }****, { double, double }***** @v_571, align 1 + %807 = load { double, double }***, { double, double }**** %806, align 1 + %808 = load { double, double }**, { double, double }*** %807, align 1 + %809 = load { double, double }*, { double, double }** %808, align 1 + %.realp538 = getelementptr inbounds { double, double }, { double, double }* %809, i32 0, i32 0 + %.imagp539 = getelementptr inbounds { double, double }, { double, double }* %809, i32 0, i32 1 + store double %v_500.real, double* %.realp538, align 1 + store double %v_500.imag, double* %.imagp539, align 1 + call void @llvm.lifetime.start.p0i8(i64 2, i8* %797) #1 + store i16 20724, i16* %v_572.realp, align 1 + store i16 0, i16* %v_572.imagp, align 1 + call void @llvm.lifetime.start.p0i8(i64 1, i8* %798) #1 + %810 = load { i16, i16 }*, { i16, i16 }** @v_574, align 1 + %.imagp540 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %810, i32 0, i32 1 + %811 = load volatile i16, i16* %.imagp540, align 1 + store volatile i16 %811, i16* %v_575, align 1 + %812 = load i32, i32* @v_245, align 1 + store i32 %812, i32* @v_16, align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %799) #1 + store i64 0, i64* %v_loop_2541, align 1 + br label %for.body546 + +for.body546: ; preds = %for.inc751, %for.body531 + %813 = load i64, i64* %v_loop_2541, align 1 + %idxprom547 = trunc i64 %813 to i32 + %arrayidx548.idx = mul i32 %idxprom547, 8 + %arrayidx548 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom547 + %arrayidx548.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx548, i32 0, i32 0 + %814 = sub i32 16, %arrayidx548.idx + %815 = icmp ult i32 16, %arrayidx548.idx + %816 = icmp ult i32 %814, 4 + %817 = or i1 %815, %816 + br i1 %817, label %trap.loopexit70, label %818 + +818: ; preds = %for.body546 + %arrayidx548.real = load double, double* %arrayidx548.realp, align 1 + %819 = add i32 %arrayidx548.idx, 4 + %arrayidx548.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx548, i32 0, i32 1 + %820 = sub i32 16, %819 + %821 = icmp ult i32 16, %819 + %822 = icmp ult i32 %820, 4 + %823 = or i1 %821, %822 + br i1 %823, label %trap.loopexit70, label %824 + +824: ; preds = %818 + %arrayidx548.imag = load double, double* %arrayidx548.imagp, align 1 + %825 = load i64, i64* %v_loop_2541, align 1 + %idxprom549 = trunc i64 %825 to i32 + %arrayidx550.idx = mul i32 %idxprom549, 8 + %arrayidx550 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom549 + %arrayidx550.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx550, i32 0, i32 0 + %826 = sub i32 16, %arrayidx550.idx + %827 = icmp ult i32 16, %arrayidx550.idx + %828 = icmp ult i32 %826, 4 + %829 = or i1 %827, %828 + br i1 %829, label %trap.loopexit70, label %830 + +830: ; preds = %824 + %arrayidx550.real = load double, double* %arrayidx550.realp, align 1 + %831 = add i32 %arrayidx550.idx, 4 + %arrayidx550.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx550, i32 0, i32 1 + %832 = sub i32 16, %831 + %833 = icmp ult i32 16, %831 + %834 = icmp ult i32 %832, 4 + %835 = or i1 %833, %834 + br i1 %835, label %trap.loopexit70, label %836 + +836: ; preds = %830 + %arrayidx550.imag = load double, double* %arrayidx550.imagp, align 1 + %sub.r = fsub double %arrayidx548.real, %arrayidx550.real + %sub.i = fsub double %arrayidx548.imag, %arrayidx550.imag + %837 = load i64, i64* %v_loop_2541, align 1 + %idxprom551 = trunc i64 %837 to i32 + %arrayidx552.idx = mul i32 %idxprom551, 8 + %arrayidx552 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom551 + %arrayidx552.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx552, i32 0, i32 0 + %838 = add i32 %arrayidx552.idx, 4 + %arrayidx552.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx552, i32 0, i32 1 + %839 = sub i32 16, %arrayidx552.idx + %840 = icmp ult i32 16, %arrayidx552.idx + %841 = icmp ult i32 %839, 4 + %842 = or i1 %840, %841 + br i1 %842, label %trap.loopexit70, label %843 + +843: ; preds = %836 + store double %sub.r, double* %arrayidx552.realp, align 1 + %844 = sub i32 16, %838 + %845 = icmp ult i32 16, %838 + %846 = icmp ult i32 %844, 4 + %847 = or i1 %845, %846 + br i1 %847, label %trap.loopexit70, label %848 + +848: ; preds = %843 + store double %sub.i, double* %arrayidx552.imagp, align 1 + %849 = load i64, i64* %v_loop_2541, align 1 + %idxprom553 = trunc i64 %849 to i32 + %arrayidx554.idx = mul i32 %idxprom553, 8 + %arrayidx554 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom553 + %arrayidx554.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx554, i32 0, i32 0 + %850 = sub i32 16, %arrayidx554.idx + %851 = icmp ult i32 16, %arrayidx554.idx + %852 = icmp ult i32 %850, 4 + %853 = or i1 %851, %852 + br i1 %853, label %trap.loopexit70, label %854 + +854: ; preds = %848 + %arrayidx554.real = load double, double* %arrayidx554.realp, align 1 + %855 = add i32 %arrayidx554.idx, 4 + %arrayidx554.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx554, i32 0, i32 1 + %856 = sub i32 16, %855 + %857 = icmp ult i32 16, %855 + %858 = icmp ult i32 %856, 4 + %859 = or i1 %857, %858 + br i1 %859, label %trap.loopexit70, label %860 + +860: ; preds = %854 + %arrayidx554.imag = load double, double* %arrayidx554.imagp, align 1 + %861 = load i64, i64* %v_loop_2541, align 1 + %idxprom555 = trunc i64 %861 to i32 + %arrayidx556.idx = mul i32 %idxprom555, 8 + %arrayidx556 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom555 + %arrayidx556.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx556, i32 0, i32 0 + %862 = sub i32 16, %arrayidx556.idx + %863 = icmp ult i32 16, %arrayidx556.idx + %864 = icmp ult i32 %862, 4 + %865 = or i1 %863, %864 + br i1 %865, label %trap.loopexit70, label %866 + +866: ; preds = %860 + %arrayidx556.real = load double, double* %arrayidx556.realp, align 1 + %867 = add i32 %arrayidx556.idx, 4 + %arrayidx556.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx556, i32 0, i32 1 + %868 = sub i32 16, %867 + %869 = icmp ult i32 16, %867 + %870 = icmp ult i32 %868, 4 + %871 = or i1 %869, %870 + br i1 %871, label %trap.loopexit70, label %872 + +872: ; preds = %866 + %arrayidx556.imag = load double, double* %arrayidx556.imagp, align 1 + %sub.r557 = fsub double %arrayidx554.real, %arrayidx556.real + %sub.i558 = fsub double %arrayidx554.imag, %arrayidx556.imag + %873 = load i64, i64* %v_loop_2541, align 1 + %idxprom559 = trunc i64 %873 to i32 + %arrayidx560.idx = mul i32 %idxprom559, 8 + %arrayidx560 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom559 + %arrayidx560.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx560, i32 0, i32 0 + %874 = add i32 %arrayidx560.idx, 4 + %arrayidx560.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx560, i32 0, i32 1 + %875 = sub i32 16, %arrayidx560.idx + %876 = icmp ult i32 16, %arrayidx560.idx + %877 = icmp ult i32 %875, 4 + %878 = or i1 %876, %877 + br i1 %878, label %trap.loopexit70, label %879 + +879: ; preds = %872 + store double %sub.r557, double* %arrayidx560.realp, align 1 + %880 = sub i32 16, %874 + %881 = icmp ult i32 16, %874 + %882 = icmp ult i32 %880, 4 + %883 = or i1 %881, %882 + br i1 %883, label %trap.loopexit70, label %884 + +884: ; preds = %879 + store double %sub.i558, double* %arrayidx560.imagp, align 1 + %885 = load i64, i64* %v_loop_2541, align 1 + %idxprom561 = trunc i64 %885 to i32 + %arrayidx562.idx = mul i32 %idxprom561, 8 + %arrayidx562 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom561 + %arrayidx562.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx562, i32 0, i32 0 + %886 = sub i32 16, %arrayidx562.idx + %887 = icmp ult i32 16, %arrayidx562.idx + %888 = icmp ult i32 %886, 4 + %889 = or i1 %887, %888 + br i1 %889, label %trap.loopexit70, label %890 + +890: ; preds = %884 + %arrayidx562.real = load double, double* %arrayidx562.realp, align 1 + %891 = add i32 %arrayidx562.idx, 4 + %arrayidx562.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx562, i32 0, i32 1 + %892 = sub i32 16, %891 + %893 = icmp ult i32 16, %891 + %894 = icmp ult i32 %892, 4 + %895 = or i1 %893, %894 + br i1 %895, label %trap.loopexit70, label %896 + +896: ; preds = %890 + %arrayidx562.imag = load double, double* %arrayidx562.imagp, align 1 + %897 = load i64, i64* %v_loop_2541, align 1 + %idxprom563 = trunc i64 %897 to i32 + %arrayidx564.idx = mul i32 %idxprom563, 8 + %arrayidx564 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom563 + %arrayidx564.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx564, i32 0, i32 0 + %898 = sub i32 16, %arrayidx564.idx + %899 = icmp ult i32 16, %arrayidx564.idx + %900 = icmp ult i32 %898, 4 + %901 = or i1 %899, %900 + br i1 %901, label %trap.loopexit70, label %902 + +902: ; preds = %896 + %arrayidx564.real = load double, double* %arrayidx564.realp, align 1 + %903 = add i32 %arrayidx564.idx, 4 + %arrayidx564.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx564, i32 0, i32 1 + %904 = sub i32 16, %903 + %905 = icmp ult i32 16, %903 + %906 = icmp ult i32 %904, 4 + %907 = or i1 %905, %906 + br i1 %907, label %trap.loopexit70, label %908 + +908: ; preds = %902 + %arrayidx564.imag = load double, double* %arrayidx564.imagp, align 1 + %909 = load i64, i64* %v_loop_2541, align 1 + %idxprom565 = trunc i64 %909 to i32 + %arrayidx566.idx = mul i32 %idxprom565, 8 + %arrayidx566 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom565 + %arrayidx566.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx566, i32 0, i32 0 + %910 = sub i32 16, %arrayidx566.idx + %911 = icmp ult i32 16, %arrayidx566.idx + %912 = icmp ult i32 %910, 4 + %913 = or i1 %911, %912 + br i1 %913, label %trap.loopexit70, label %914 + +914: ; preds = %908 + %arrayidx566.real = load double, double* %arrayidx566.realp, align 1 + %915 = add i32 %arrayidx566.idx, 4 + %arrayidx566.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx566, i32 0, i32 1 + %916 = sub i32 16, %915 + %917 = icmp ult i32 16, %915 + %918 = icmp ult i32 %916, 4 + %919 = or i1 %917, %918 + br i1 %919, label %trap.loopexit70, label %920 + +920: ; preds = %914 + %arrayidx566.imag = load double, double* %arrayidx566.imagp, align 1 + %921 = load i64, i64* %v_loop_2541, align 1 + %idxprom567 = trunc i64 %921 to i32 + %arrayidx568.idx = mul i32 %idxprom567, 8 + %arrayidx568 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom567 + %arrayidx568.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx568, i32 0, i32 0 + %922 = sub i32 16, %arrayidx568.idx + %923 = icmp ult i32 16, %arrayidx568.idx + %924 = icmp ult i32 %922, 4 + %925 = or i1 %923, %924 + br i1 %925, label %trap.loopexit70, label %926 + +926: ; preds = %920 + %arrayidx568.real = load double, double* %arrayidx568.realp, align 1 + %927 = add i32 %arrayidx568.idx, 4 + %arrayidx568.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx568, i32 0, i32 1 + %928 = sub i32 16, %927 + %929 = icmp ult i32 16, %927 + %930 = icmp ult i32 %928, 4 + %931 = or i1 %929, %930 + br i1 %931, label %trap.loopexit70, label %932 + +932: ; preds = %926 + %arrayidx568.imag = load double, double* %arrayidx568.imagp, align 1 + %933 = load i64, i64* %v_loop_2541, align 1 + %idxprom569 = trunc i64 %933 to i32 + %arrayidx570.idx = mul i32 %idxprom569, 8 + %arrayidx570 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom569 + %arrayidx570.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx570, i32 0, i32 0 + %934 = sub i32 16, %arrayidx570.idx + %935 = icmp ult i32 16, %arrayidx570.idx + %936 = icmp ult i32 %934, 4 + %937 = or i1 %935, %936 + br i1 %937, label %trap.loopexit70, label %938 + +938: ; preds = %932 + %arrayidx570.real = load double, double* %arrayidx570.realp, align 1 + %939 = add i32 %arrayidx570.idx, 4 + %arrayidx570.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx570, i32 0, i32 1 + %940 = sub i32 16, %939 + %941 = icmp ult i32 16, %939 + %942 = icmp ult i32 %940, 4 + %943 = or i1 %941, %942 + br i1 %943, label %trap.loopexit70, label %944 + +944: ; preds = %938 + %arrayidx570.imag = load double, double* %arrayidx570.imagp, align 1 + %mul_ac = fmul double %arrayidx568.real, %arrayidx570.real + %mul_bd = fmul double %arrayidx568.imag, %arrayidx570.imag + %mul_ad = fmul double %arrayidx568.real, %arrayidx570.imag + %mul_bc = fmul double %arrayidx568.imag, %arrayidx570.real + %mul_r = fsub double %mul_ac, %mul_bd + %mul_i = fadd double %mul_ad, %mul_bc + %isnan_cmp = fcmp uno double %mul_r, %mul_r + br i1 %isnan_cmp, label %complex_mul_imag_nan, label %complex_mul_cont + +complex_mul_imag_nan: ; preds = %944 + %isnan_cmp571 = fcmp uno double %mul_i, %mul_i + br i1 %isnan_cmp571, label %complex_mul_libcall, label %complex_mul_cont + +complex_mul_libcall: ; preds = %complex_mul_imag_nan + call void @__muldc3({ double, double }* align 1 %tmp572, double %arrayidx568.real, double %arrayidx568.imag, double %arrayidx570.real, double %arrayidx570.imag) #1 + %tmp572.real = load double, double* %tmp572.realp, align 1 + %tmp572.imag = load double, double* %tmp572.imagp, align 1 + br label %complex_mul_cont + +complex_mul_cont: ; preds = %complex_mul_libcall, %complex_mul_imag_nan, %944 + %real_mul_phi = phi double [ %mul_r, %944 ], [ %mul_r, %complex_mul_imag_nan ], [ %tmp572.real, %complex_mul_libcall ] + %imag_mul_phi = phi double [ %mul_i, %944 ], [ %mul_i, %complex_mul_imag_nan ], [ %tmp572.imag, %complex_mul_libcall ] + %sub.r573 = fsub double %arrayidx566.real, %real_mul_phi + %sub.i574 = fsub double %arrayidx566.imag, %imag_mul_phi + %mul_ac575 = fmul double %arrayidx564.real, %sub.r573 + %mul_bd576 = fmul double %arrayidx564.imag, %sub.i574 + %mul_ad577 = fmul double %arrayidx564.real, %sub.i574 + %mul_bc578 = fmul double %arrayidx564.imag, %sub.r573 + %mul_r579 = fsub double %mul_ac575, %mul_bd576 + %mul_i580 = fadd double %mul_ad577, %mul_bc578 + %isnan_cmp581 = fcmp uno double %mul_r579, %mul_r579 + br i1 %isnan_cmp581, label %complex_mul_imag_nan582, label %complex_mul_cont586 + +complex_mul_imag_nan582: ; preds = %complex_mul_cont + %isnan_cmp583 = fcmp uno double %mul_i580, %mul_i580 + br i1 %isnan_cmp583, label %complex_mul_libcall584, label %complex_mul_cont586 + +complex_mul_libcall584: ; preds = %complex_mul_imag_nan582 + call void @__muldc3({ double, double }* align 1 %tmp585, double %arrayidx564.real, double %arrayidx564.imag, double %sub.r573, double %sub.i574) #1 + %tmp585.real = load double, double* %tmp585.realp, align 1 + %tmp585.imag = load double, double* %tmp585.imagp, align 1 + br label %complex_mul_cont586 + +complex_mul_cont586: ; preds = %complex_mul_libcall584, %complex_mul_imag_nan582, %complex_mul_cont + %real_mul_phi587 = phi double [ %mul_r579, %complex_mul_cont ], [ %mul_r579, %complex_mul_imag_nan582 ], [ %tmp585.real, %complex_mul_libcall584 ] + %imag_mul_phi588 = phi double [ %mul_i580, %complex_mul_cont ], [ %mul_i580, %complex_mul_imag_nan582 ], [ %tmp585.imag, %complex_mul_libcall584 ] + %mul_ac589 = fmul double %arrayidx562.real, %real_mul_phi587 + %mul_bd590 = fmul double %arrayidx562.imag, %imag_mul_phi588 + %mul_ad591 = fmul double %arrayidx562.real, %imag_mul_phi588 + %mul_bc592 = fmul double %arrayidx562.imag, %real_mul_phi587 + %mul_r593 = fsub double %mul_ac589, %mul_bd590 + %mul_i594 = fadd double %mul_ad591, %mul_bc592 + %isnan_cmp595 = fcmp uno double %mul_r593, %mul_r593 + br i1 %isnan_cmp595, label %complex_mul_imag_nan596, label %complex_mul_cont600 + +complex_mul_imag_nan596: ; preds = %complex_mul_cont586 + %isnan_cmp597 = fcmp uno double %mul_i594, %mul_i594 + br i1 %isnan_cmp597, label %complex_mul_libcall598, label %complex_mul_cont600 + +complex_mul_libcall598: ; preds = %complex_mul_imag_nan596 + call void @__muldc3({ double, double }* align 1 %tmp599, double %arrayidx562.real, double %arrayidx562.imag, double %real_mul_phi587, double %imag_mul_phi588) #1 + %tmp599.real = load double, double* %tmp599.realp, align 1 + %tmp599.imag = load double, double* %tmp599.imagp, align 1 + br label %complex_mul_cont600 + +complex_mul_cont600: ; preds = %complex_mul_libcall598, %complex_mul_imag_nan596, %complex_mul_cont586 + %real_mul_phi601 = phi double [ %mul_r593, %complex_mul_cont586 ], [ %mul_r593, %complex_mul_imag_nan596 ], [ %tmp599.real, %complex_mul_libcall598 ] + %imag_mul_phi602 = phi double [ %mul_i594, %complex_mul_cont586 ], [ %mul_i594, %complex_mul_imag_nan596 ], [ %tmp599.imag, %complex_mul_libcall598 ] + %945 = load i64, i64* %v_loop_2541, align 1 + %idxprom603 = trunc i64 %945 to i32 + %arrayidx604.idx = mul i32 %idxprom603, 8 + %arrayidx604 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom603 + %arrayidx604.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx604, i32 0, i32 0 + %946 = sub i32 16, %arrayidx604.idx + %947 = icmp ult i32 16, %arrayidx604.idx + %948 = icmp ult i32 %946, 4 + %949 = or i1 %947, %948 + br i1 %949, label %trap.loopexit70, label %950 + +950: ; preds = %complex_mul_cont600 + %arrayidx604.real = load double, double* %arrayidx604.realp, align 1 + %951 = add i32 %arrayidx604.idx, 4 + %arrayidx604.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx604, i32 0, i32 1 + %952 = sub i32 16, %951 + %953 = icmp ult i32 16, %951 + %954 = icmp ult i32 %952, 4 + %955 = or i1 %953, %954 + br i1 %955, label %trap.loopexit70, label %956 + +956: ; preds = %950 + %arrayidx604.imag = load double, double* %arrayidx604.imagp, align 1 + %add.r = fadd double %real_mul_phi601, %arrayidx604.real + %add.i = fadd double %imag_mul_phi602, %arrayidx604.imag + %957 = load i64, i64* %v_loop_2541, align 1 + %idxprom605 = trunc i64 %957 to i32 + %arrayidx606.idx = mul i32 %idxprom605, 8 + %arrayidx606 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom605 + %arrayidx606.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx606, i32 0, i32 0 + %958 = add i32 %arrayidx606.idx, 4 + %arrayidx606.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx606, i32 0, i32 1 + %959 = sub i32 16, %arrayidx606.idx + %960 = icmp ult i32 16, %arrayidx606.idx + %961 = icmp ult i32 %959, 4 + %962 = or i1 %960, %961 + br i1 %962, label %trap.loopexit70, label %963 + +963: ; preds = %956 + store double %add.r, double* %arrayidx606.realp, align 1 + %964 = sub i32 16, %958 + %965 = icmp ult i32 16, %958 + %966 = icmp ult i32 %964, 4 + %967 = or i1 %965, %966 + br i1 %967, label %trap.loopexit70, label %968 + +968: ; preds = %963 + store double %add.i, double* %arrayidx606.imagp, align 1 + %969 = load i64, i64* %v_loop_2541, align 1 + %idxprom607 = trunc i64 %969 to i32 + %arrayidx608.idx = mul i32 %idxprom607, 8 + %arrayidx608 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom607 + %arrayidx608.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx608, i32 0, i32 0 + %970 = sub i32 16, %arrayidx608.idx + %971 = icmp ult i32 16, %arrayidx608.idx + %972 = icmp ult i32 %970, 4 + %973 = or i1 %971, %972 + br i1 %973, label %trap.loopexit70, label %974 + +974: ; preds = %968 + %arrayidx608.real = load double, double* %arrayidx608.realp, align 1 + %975 = add i32 %arrayidx608.idx, 4 + %arrayidx608.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx608, i32 0, i32 1 + %976 = sub i32 16, %975 + %977 = icmp ult i32 16, %975 + %978 = icmp ult i32 %976, 4 + %979 = or i1 %977, %978 + br i1 %979, label %trap.loopexit70, label %980 + +980: ; preds = %974 + %arrayidx608.imag = load double, double* %arrayidx608.imagp, align 1 + %981 = load i64, i64* %v_loop_2541, align 1 + %idxprom609 = trunc i64 %981 to i32 + %arrayidx610.idx = mul i32 %idxprom609, 8 + %arrayidx610 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom609 + %arrayidx610.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx610, i32 0, i32 0 + %982 = sub i32 16, %arrayidx610.idx + %983 = icmp ult i32 16, %arrayidx610.idx + %984 = icmp ult i32 %982, 4 + %985 = or i1 %983, %984 + br i1 %985, label %trap.loopexit70, label %986 + +986: ; preds = %980 + %arrayidx610.real = load double, double* %arrayidx610.realp, align 1 + %987 = add i32 %arrayidx610.idx, 4 + %arrayidx610.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx610, i32 0, i32 1 + %988 = sub i32 16, %987 + %989 = icmp ult i32 16, %987 + %990 = icmp ult i32 %988, 4 + %991 = or i1 %989, %990 + br i1 %991, label %trap.loopexit70, label %992 + +992: ; preds = %986 + %arrayidx610.imag = load double, double* %arrayidx610.imagp, align 1 + %mul_ac611 = fmul double %arrayidx608.real, %arrayidx610.real + %mul_bd612 = fmul double %arrayidx608.imag, %arrayidx610.imag + %mul_ad613 = fmul double %arrayidx608.real, %arrayidx610.imag + %mul_bc614 = fmul double %arrayidx608.imag, %arrayidx610.real + %mul_r615 = fsub double %mul_ac611, %mul_bd612 + %mul_i616 = fadd double %mul_ad613, %mul_bc614 + %isnan_cmp617 = fcmp uno double %mul_r615, %mul_r615 + br i1 %isnan_cmp617, label %complex_mul_imag_nan618, label %complex_mul_cont622 + +complex_mul_imag_nan618: ; preds = %992 + %isnan_cmp619 = fcmp uno double %mul_i616, %mul_i616 + br i1 %isnan_cmp619, label %complex_mul_libcall620, label %complex_mul_cont622 + +complex_mul_libcall620: ; preds = %complex_mul_imag_nan618 + call void @__muldc3({ double, double }* align 1 %tmp621, double %arrayidx608.real, double %arrayidx608.imag, double %arrayidx610.real, double %arrayidx610.imag) #1 + %tmp621.real = load double, double* %tmp621.realp, align 1 + %tmp621.imag = load double, double* %tmp621.imagp, align 1 + br label %complex_mul_cont622 + +complex_mul_cont622: ; preds = %complex_mul_libcall620, %complex_mul_imag_nan618, %992 + %real_mul_phi623 = phi double [ %mul_r615, %992 ], [ %mul_r615, %complex_mul_imag_nan618 ], [ %tmp621.real, %complex_mul_libcall620 ] + %imag_mul_phi624 = phi double [ %mul_i616, %992 ], [ %mul_i616, %complex_mul_imag_nan618 ], [ %tmp621.imag, %complex_mul_libcall620 ] + %993 = load i64, i64* %v_loop_2541, align 1 + %idxprom625 = trunc i64 %993 to i32 + %arrayidx626.idx = mul i32 %idxprom625, 8 + %arrayidx626 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom625 + %arrayidx626.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx626, i32 0, i32 0 + %994 = add i32 %arrayidx626.idx, 4 + %arrayidx626.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx626, i32 0, i32 1 + %995 = sub i32 16, %arrayidx626.idx + %996 = icmp ult i32 16, %arrayidx626.idx + %997 = icmp ult i32 %995, 4 + %998 = or i1 %996, %997 + br i1 %998, label %trap.loopexit70, label %999 + +999: ; preds = %complex_mul_cont622 + store double %real_mul_phi623, double* %arrayidx626.realp, align 1 + %1000 = sub i32 16, %994 + %1001 = icmp ult i32 16, %994 + %1002 = icmp ult i32 %1000, 4 + %1003 = or i1 %1001, %1002 + br i1 %1003, label %trap.loopexit70, label %1004 + +1004: ; preds = %999 + store double %imag_mul_phi624, double* %arrayidx626.imagp, align 1 + %1005 = load i64, i64* %v_loop_2541, align 1 + %idxprom627 = trunc i64 %1005 to i32 + %arrayidx628.idx = mul i32 %idxprom627, 8 + %arrayidx628 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom627 + %arrayidx628.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx628, i32 0, i32 0 + %1006 = sub i32 16, %arrayidx628.idx + %1007 = icmp ult i32 16, %arrayidx628.idx + %1008 = icmp ult i32 %1006, 4 + %1009 = or i1 %1007, %1008 + br i1 %1009, label %trap.loopexit70, label %1010 + +1010: ; preds = %1004 + %arrayidx628.real = load double, double* %arrayidx628.realp, align 1 + %1011 = add i32 %arrayidx628.idx, 4 + %arrayidx628.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx628, i32 0, i32 1 + %1012 = sub i32 16, %1011 + %1013 = icmp ult i32 16, %1011 + %1014 = icmp ult i32 %1012, 4 + %1015 = or i1 %1013, %1014 + br i1 %1015, label %trap.loopexit70, label %1016 + +1016: ; preds = %1010 + %arrayidx628.imag = load double, double* %arrayidx628.imagp, align 1 + %1017 = load i64, i64* %v_loop_2541, align 1 + %idxprom629 = trunc i64 %1017 to i32 + %arrayidx630.idx = mul i32 %idxprom629, 8 + %arrayidx630 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom629 + %arrayidx630.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx630, i32 0, i32 0 + %1018 = sub i32 16, %arrayidx630.idx + %1019 = icmp ult i32 16, %arrayidx630.idx + %1020 = icmp ult i32 %1018, 4 + %1021 = or i1 %1019, %1020 + br i1 %1021, label %trap.loopexit70, label %1022 + +1022: ; preds = %1016 + %arrayidx630.real = load double, double* %arrayidx630.realp, align 1 + %1023 = add i32 %arrayidx630.idx, 4 + %arrayidx630.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx630, i32 0, i32 1 + %1024 = sub i32 16, %1023 + %1025 = icmp ult i32 16, %1023 + %1026 = icmp ult i32 %1024, 4 + %1027 = or i1 %1025, %1026 + br i1 %1027, label %trap.loopexit70, label %1028 + +1028: ; preds = %1022 + %arrayidx630.imag = load double, double* %arrayidx630.imagp, align 1 + %add.r631 = fadd double %arrayidx628.real, %arrayidx630.real + %add.i632 = fadd double %arrayidx628.imag, %arrayidx630.imag + %1029 = load i64, i64* %v_loop_2541, align 1 + %idxprom633 = trunc i64 %1029 to i32 + %arrayidx634.idx = mul i32 %idxprom633, 8 + %arrayidx634 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom633 + %arrayidx634.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx634, i32 0, i32 0 + %1030 = add i32 %arrayidx634.idx, 4 + %arrayidx634.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx634, i32 0, i32 1 + %1031 = sub i32 16, %arrayidx634.idx + %1032 = icmp ult i32 16, %arrayidx634.idx + %1033 = icmp ult i32 %1031, 4 + %1034 = or i1 %1032, %1033 + br i1 %1034, label %trap.loopexit70, label %1035 + +1035: ; preds = %1028 + store double %add.r631, double* %arrayidx634.realp, align 1 + %1036 = sub i32 16, %1030 + %1037 = icmp ult i32 16, %1030 + %1038 = icmp ult i32 %1036, 4 + %1039 = or i1 %1037, %1038 + br i1 %1039, label %trap.loopexit70, label %1040 + +1040: ; preds = %1035 + store double %add.i632, double* %arrayidx634.imagp, align 1 + %1041 = load i64, i64* %v_loop_2541, align 1 + %idxprom635 = trunc i64 %1041 to i32 + %arrayidx636.idx = mul i32 %idxprom635, 8 + %arrayidx636 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom635 + %arrayidx636.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx636, i32 0, i32 0 + %1042 = sub i32 16, %arrayidx636.idx + %1043 = icmp ult i32 16, %arrayidx636.idx + %1044 = icmp ult i32 %1042, 4 + %1045 = or i1 %1043, %1044 + br i1 %1045, label %trap.loopexit70, label %1046 + +1046: ; preds = %1040 + %arrayidx636.real = load double, double* %arrayidx636.realp, align 1 + %1047 = add i32 %arrayidx636.idx, 4 + %arrayidx636.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx636, i32 0, i32 1 + %1048 = sub i32 16, %1047 + %1049 = icmp ult i32 16, %1047 + %1050 = icmp ult i32 %1048, 4 + %1051 = or i1 %1049, %1050 + br i1 %1051, label %trap.loopexit70, label %1052 + +1052: ; preds = %1046 + %arrayidx636.imag = load double, double* %arrayidx636.imagp, align 1 + %1053 = load i64, i64* %v_loop_2541, align 1 + %idxprom637 = trunc i64 %1053 to i32 + %arrayidx638.idx = mul i32 %idxprom637, 8 + %arrayidx638 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom637 + %arrayidx638.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx638, i32 0, i32 0 + %1054 = sub i32 16, %arrayidx638.idx + %1055 = icmp ult i32 16, %arrayidx638.idx + %1056 = icmp ult i32 %1054, 4 + %1057 = or i1 %1055, %1056 + br i1 %1057, label %trap.loopexit70, label %1058 + +1058: ; preds = %1052 + %arrayidx638.real = load double, double* %arrayidx638.realp, align 1 + %1059 = add i32 %arrayidx638.idx, 4 + %arrayidx638.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx638, i32 0, i32 1 + %1060 = sub i32 16, %1059 + %1061 = icmp ult i32 16, %1059 + %1062 = icmp ult i32 %1060, 4 + %1063 = or i1 %1061, %1062 + br i1 %1063, label %trap.loopexit70, label %1064 + +1064: ; preds = %1058 + %arrayidx638.imag = load double, double* %arrayidx638.imagp, align 1 + %mul_ac639 = fmul double %arrayidx636.real, %arrayidx638.real + %mul_bd640 = fmul double %arrayidx636.imag, %arrayidx638.imag + %mul_ad641 = fmul double %arrayidx636.real, %arrayidx638.imag + %mul_bc642 = fmul double %arrayidx636.imag, %arrayidx638.real + %mul_r643 = fsub double %mul_ac639, %mul_bd640 + %mul_i644 = fadd double %mul_ad641, %mul_bc642 + %isnan_cmp645 = fcmp uno double %mul_r643, %mul_r643 + br i1 %isnan_cmp645, label %complex_mul_imag_nan646, label %complex_mul_cont650 + +complex_mul_imag_nan646: ; preds = %1064 + %isnan_cmp647 = fcmp uno double %mul_i644, %mul_i644 + br i1 %isnan_cmp647, label %complex_mul_libcall648, label %complex_mul_cont650 + +complex_mul_libcall648: ; preds = %complex_mul_imag_nan646 + call void @__muldc3({ double, double }* align 1 %tmp649, double %arrayidx636.real, double %arrayidx636.imag, double %arrayidx638.real, double %arrayidx638.imag) #1 + %tmp649.real = load double, double* %tmp649.realp, align 1 + %tmp649.imag = load double, double* %tmp649.imagp, align 1 + br label %complex_mul_cont650 + +complex_mul_cont650: ; preds = %complex_mul_libcall648, %complex_mul_imag_nan646, %1064 + %real_mul_phi651 = phi double [ %mul_r643, %1064 ], [ %mul_r643, %complex_mul_imag_nan646 ], [ %tmp649.real, %complex_mul_libcall648 ] + %imag_mul_phi652 = phi double [ %mul_i644, %1064 ], [ %mul_i644, %complex_mul_imag_nan646 ], [ %tmp649.imag, %complex_mul_libcall648 ] + %1065 = load i64, i64* %v_loop_2541, align 1 + %idxprom653 = trunc i64 %1065 to i32 + %arrayidx654.idx = mul i32 %idxprom653, 8 + %arrayidx654 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom653 + %arrayidx654.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx654, i32 0, i32 0 + %1066 = add i32 %arrayidx654.idx, 4 + %arrayidx654.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx654, i32 0, i32 1 + %1067 = sub i32 16, %arrayidx654.idx + %1068 = icmp ult i32 16, %arrayidx654.idx + %1069 = icmp ult i32 %1067, 4 + %1070 = or i1 %1068, %1069 + br i1 %1070, label %trap.loopexit70, label %1071 + +1071: ; preds = %complex_mul_cont650 + store double %real_mul_phi651, double* %arrayidx654.realp, align 1 + %1072 = sub i32 16, %1066 + %1073 = icmp ult i32 16, %1066 + %1074 = icmp ult i32 %1072, 4 + %1075 = or i1 %1073, %1074 + br i1 %1075, label %trap.loopexit70, label %1076 + +1076: ; preds = %1071 + store double %imag_mul_phi652, double* %arrayidx654.imagp, align 1 + %1077 = load i64, i64* %v_loop_2541, align 1 + %idxprom655 = trunc i64 %1077 to i32 + %arrayidx656.idx = mul i32 %idxprom655, 8 + %arrayidx656 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom655 + %arrayidx656.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx656, i32 0, i32 0 + %1078 = sub i32 16, %arrayidx656.idx + %1079 = icmp ult i32 16, %arrayidx656.idx + %1080 = icmp ult i32 %1078, 4 + %1081 = or i1 %1079, %1080 + br i1 %1081, label %trap.loopexit70, label %1082 + +1082: ; preds = %1076 + %arrayidx656.real = load double, double* %arrayidx656.realp, align 1 + %1083 = add i32 %arrayidx656.idx, 4 + %arrayidx656.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx656, i32 0, i32 1 + %1084 = sub i32 16, %1083 + %1085 = icmp ult i32 16, %1083 + %1086 = icmp ult i32 %1084, 4 + %1087 = or i1 %1085, %1086 + br i1 %1087, label %trap.loopexit70, label %1088 + +1088: ; preds = %1082 + %arrayidx656.imag = load double, double* %arrayidx656.imagp, align 1 + %1089 = load i64, i64* %v_loop_2541, align 1 + %idxprom657 = trunc i64 %1089 to i32 + %arrayidx658.idx = mul i32 %idxprom657, 8 + %arrayidx658 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom657 + %arrayidx658.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx658, i32 0, i32 0 + %1090 = sub i32 16, %arrayidx658.idx + %1091 = icmp ult i32 16, %arrayidx658.idx + %1092 = icmp ult i32 %1090, 4 + %1093 = or i1 %1091, %1092 + br i1 %1093, label %trap.loopexit70, label %1094 + +1094: ; preds = %1088 + %arrayidx658.real = load double, double* %arrayidx658.realp, align 1 + %1095 = add i32 %arrayidx658.idx, 4 + %arrayidx658.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx658, i32 0, i32 1 + %1096 = sub i32 16, %1095 + %1097 = icmp ult i32 16, %1095 + %1098 = icmp ult i32 %1096, 4 + %1099 = or i1 %1097, %1098 + br i1 %1099, label %trap.loopexit70, label %1100 + +1100: ; preds = %1094 + %arrayidx658.imag = load double, double* %arrayidx658.imagp, align 1 + %1101 = load i64, i64* %v_loop_2541, align 1 + %idxprom659 = trunc i64 %1101 to i32 + %arrayidx660.idx = mul i32 %idxprom659, 8 + %arrayidx660 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom659 + %arrayidx660.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx660, i32 0, i32 0 + %1102 = sub i32 16, %arrayidx660.idx + %1103 = icmp ult i32 16, %arrayidx660.idx + %1104 = icmp ult i32 %1102, 4 + %1105 = or i1 %1103, %1104 + br i1 %1105, label %trap.loopexit70, label %1106 + +1106: ; preds = %1100 + %arrayidx660.real = load double, double* %arrayidx660.realp, align 1 + %1107 = add i32 %arrayidx660.idx, 4 + %arrayidx660.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx660, i32 0, i32 1 + %1108 = sub i32 16, %1107 + %1109 = icmp ult i32 16, %1107 + %1110 = icmp ult i32 %1108, 4 + %1111 = or i1 %1109, %1110 + br i1 %1111, label %trap.loopexit70, label %1112 + +1112: ; preds = %1106 + %arrayidx660.imag = load double, double* %arrayidx660.imagp, align 1 + %mul_ac661 = fmul double %arrayidx658.real, %arrayidx660.real + %mul_bd662 = fmul double %arrayidx658.imag, %arrayidx660.imag + %mul_ad663 = fmul double %arrayidx658.real, %arrayidx660.imag + %mul_bc664 = fmul double %arrayidx658.imag, %arrayidx660.real + %mul_r665 = fsub double %mul_ac661, %mul_bd662 + %mul_i666 = fadd double %mul_ad663, %mul_bc664 + %isnan_cmp667 = fcmp uno double %mul_r665, %mul_r665 + br i1 %isnan_cmp667, label %complex_mul_imag_nan668, label %complex_mul_cont672 + +complex_mul_imag_nan668: ; preds = %1112 + %isnan_cmp669 = fcmp uno double %mul_i666, %mul_i666 + br i1 %isnan_cmp669, label %complex_mul_libcall670, label %complex_mul_cont672 + +complex_mul_libcall670: ; preds = %complex_mul_imag_nan668 + call void @__muldc3({ double, double }* align 1 %tmp671, double %arrayidx658.real, double %arrayidx658.imag, double %arrayidx660.real, double %arrayidx660.imag) #1 + %tmp671.real = load double, double* %tmp671.realp, align 1 + %tmp671.imag = load double, double* %tmp671.imagp, align 1 + br label %complex_mul_cont672 + +complex_mul_cont672: ; preds = %complex_mul_libcall670, %complex_mul_imag_nan668, %1112 + %real_mul_phi673 = phi double [ %mul_r665, %1112 ], [ %mul_r665, %complex_mul_imag_nan668 ], [ %tmp671.real, %complex_mul_libcall670 ] + %imag_mul_phi674 = phi double [ %mul_i666, %1112 ], [ %mul_i666, %complex_mul_imag_nan668 ], [ %tmp671.imag, %complex_mul_libcall670 ] + %add.r675 = fadd double %arrayidx656.real, %real_mul_phi673 + %add.i676 = fadd double %arrayidx656.imag, %imag_mul_phi674 + %1113 = load i64, i64* %v_loop_2541, align 1 + %idxprom677 = trunc i64 %1113 to i32 + %arrayidx678.idx = mul i32 %idxprom677, 8 + %arrayidx678 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_583, i32 0, i32 %idxprom677 + %arrayidx678.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx678, i32 0, i32 0 + %1114 = add i32 %arrayidx678.idx, 4 + %arrayidx678.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx678, i32 0, i32 1 + %1115 = sub i32 16, %arrayidx678.idx + %1116 = icmp ult i32 16, %arrayidx678.idx + %1117 = icmp ult i32 %1115, 4 + %1118 = or i1 %1116, %1117 + br i1 %1118, label %trap.loopexit70, label %1119 + +1119: ; preds = %complex_mul_cont672 + store double %add.r675, double* %arrayidx678.realp, align 1 + %1120 = sub i32 16, %1114 + %1121 = icmp ult i32 16, %1114 + %1122 = icmp ult i32 %1120, 4 + %1123 = or i1 %1121, %1122 + br i1 %1123, label %trap.loopexit70, label %1124 + +1124: ; preds = %1119 + store double %add.i676, double* %arrayidx678.imagp, align 1 + %1125 = load i64, i64* %v_loop_2541, align 1 + %idxprom679 = trunc i64 %1125 to i32 + %arrayidx680.idx = mul i32 %idxprom679, 8 + %arrayidx680 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom679 + %arrayidx680.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx680, i32 0, i32 0 + %1126 = sub i32 16, %arrayidx680.idx + %1127 = icmp ult i32 16, %arrayidx680.idx + %1128 = icmp ult i32 %1126, 4 + %1129 = or i1 %1127, %1128 + br i1 %1129, label %trap.loopexit70, label %1130 + +1130: ; preds = %1124 + %arrayidx680.real = load double, double* %arrayidx680.realp, align 1 + %1131 = add i32 %arrayidx680.idx, 4 + %arrayidx680.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx680, i32 0, i32 1 + %1132 = sub i32 16, %1131 + %1133 = icmp ult i32 16, %1131 + %1134 = icmp ult i32 %1132, 4 + %1135 = or i1 %1133, %1134 + br i1 %1135, label %trap.loopexit70, label %1136 + +1136: ; preds = %1130 + %arrayidx680.imag = load double, double* %arrayidx680.imagp, align 1 + %1137 = load i64, i64* %v_loop_2541, align 1 + %idxprom681 = trunc i64 %1137 to i32 + %arrayidx682.idx = mul i32 %idxprom681, 8 + %arrayidx682 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom681 + %arrayidx682.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx682, i32 0, i32 0 + %1138 = sub i32 16, %arrayidx682.idx + %1139 = icmp ult i32 16, %arrayidx682.idx + %1140 = icmp ult i32 %1138, 4 + %1141 = or i1 %1139, %1140 + br i1 %1141, label %trap.loopexit70, label %1142 + +1142: ; preds = %1136 + %arrayidx682.real = load double, double* %arrayidx682.realp, align 1 + %1143 = add i32 %arrayidx682.idx, 4 + %arrayidx682.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx682, i32 0, i32 1 + %1144 = sub i32 16, %1143 + %1145 = icmp ult i32 16, %1143 + %1146 = icmp ult i32 %1144, 4 + %1147 = or i1 %1145, %1146 + br i1 %1147, label %trap.loopexit70, label %1148 + +1148: ; preds = %1142 + %arrayidx682.imag = load double, double* %arrayidx682.imagp, align 1 + %1149 = load i64, i64* %v_loop_2541, align 1 + %idxprom683 = trunc i64 %1149 to i32 + %arrayidx684.idx = mul i32 %idxprom683, 8 + %arrayidx684 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom683 + %arrayidx684.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx684, i32 0, i32 0 + %1150 = sub i32 16, %arrayidx684.idx + %1151 = icmp ult i32 16, %arrayidx684.idx + %1152 = icmp ult i32 %1150, 4 + %1153 = or i1 %1151, %1152 + br i1 %1153, label %trap.loopexit70, label %1154 + +1154: ; preds = %1148 + %arrayidx684.real = load double, double* %arrayidx684.realp, align 1 + %1155 = add i32 %arrayidx684.idx, 4 + %arrayidx684.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx684, i32 0, i32 1 + %1156 = sub i32 16, %1155 + %1157 = icmp ult i32 16, %1155 + %1158 = icmp ult i32 %1156, 4 + %1159 = or i1 %1157, %1158 + br i1 %1159, label %trap.loopexit70, label %1160 + +1160: ; preds = %1154 + %arrayidx684.imag = load double, double* %arrayidx684.imagp, align 1 + %1161 = load i64, i64* %v_loop_2541, align 1 + %idxprom685 = trunc i64 %1161 to i32 + %arrayidx686.idx = mul i32 %idxprom685, 8 + %arrayidx686 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_583, i32 0, i32 %idxprom685 + %arrayidx686.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx686, i32 0, i32 0 + %1162 = sub i32 16, %arrayidx686.idx + %1163 = icmp ult i32 16, %arrayidx686.idx + %1164 = icmp ult i32 %1162, 4 + %1165 = or i1 %1163, %1164 + br i1 %1165, label %trap.loopexit70, label %1166 + +1166: ; preds = %1160 + %arrayidx686.real = load double, double* %arrayidx686.realp, align 1 + %1167 = add i32 %arrayidx686.idx, 4 + %arrayidx686.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx686, i32 0, i32 1 + %1168 = sub i32 16, %1167 + %1169 = icmp ult i32 16, %1167 + %1170 = icmp ult i32 %1168, 4 + %1171 = or i1 %1169, %1170 + br i1 %1171, label %trap.loopexit70, label %1172 + +1172: ; preds = %1166 + %arrayidx686.imag = load double, double* %arrayidx686.imagp, align 1 + %add.r687 = fadd double %arrayidx684.real, %arrayidx686.real + %add.i688 = fadd double %arrayidx684.imag, %arrayidx686.imag + %sub.r689 = fsub double %arrayidx682.real, %add.r687 + %sub.i690 = fsub double %arrayidx682.imag, %add.i688 + %1173 = load i64, i64* %v_loop_2541, align 1 + %idxprom691 = trunc i64 %1173 to i32 + %arrayidx692.idx = mul i32 %idxprom691, 8 + %arrayidx692 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom691 + %arrayidx692.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx692, i32 0, i32 0 + %1174 = sub i32 16, %arrayidx692.idx + %1175 = icmp ult i32 16, %arrayidx692.idx + %1176 = icmp ult i32 %1174, 4 + %1177 = or i1 %1175, %1176 + br i1 %1177, label %trap.loopexit70, label %1178 + +1178: ; preds = %1172 + %arrayidx692.real = load double, double* %arrayidx692.realp, align 1 + %1179 = add i32 %arrayidx692.idx, 4 + %arrayidx692.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx692, i32 0, i32 1 + %1180 = sub i32 16, %1179 + %1181 = icmp ult i32 16, %1179 + %1182 = icmp ult i32 %1180, 4 + %1183 = or i1 %1181, %1182 + br i1 %1183, label %trap.loopexit70, label %1184 + +1184: ; preds = %1178 + %arrayidx692.imag = load double, double* %arrayidx692.imagp, align 1 + %mul_ac693 = fmul double %sub.r689, %arrayidx692.real + %mul_bd694 = fmul double %sub.i690, %arrayidx692.imag + %mul_ad695 = fmul double %sub.r689, %arrayidx692.imag + %mul_bc696 = fmul double %sub.i690, %arrayidx692.real + %mul_r697 = fsub double %mul_ac693, %mul_bd694 + %mul_i698 = fadd double %mul_ad695, %mul_bc696 + %isnan_cmp699 = fcmp uno double %mul_r697, %mul_r697 + br i1 %isnan_cmp699, label %complex_mul_imag_nan700, label %complex_mul_cont704 + +complex_mul_imag_nan700: ; preds = %1184 + %isnan_cmp701 = fcmp uno double %mul_i698, %mul_i698 + br i1 %isnan_cmp701, label %complex_mul_libcall702, label %complex_mul_cont704 + +complex_mul_libcall702: ; preds = %complex_mul_imag_nan700 + call void @__muldc3({ double, double }* align 1 %tmp703, double %sub.r689, double %sub.i690, double %arrayidx692.real, double %arrayidx692.imag) #1 + %tmp703.real = load double, double* %tmp703.realp, align 1 + %tmp703.imag = load double, double* %tmp703.imagp, align 1 + br label %complex_mul_cont704 + +complex_mul_cont704: ; preds = %complex_mul_libcall702, %complex_mul_imag_nan700, %1184 + %real_mul_phi705 = phi double [ %mul_r697, %1184 ], [ %mul_r697, %complex_mul_imag_nan700 ], [ %tmp703.real, %complex_mul_libcall702 ] + %imag_mul_phi706 = phi double [ %mul_i698, %1184 ], [ %mul_i698, %complex_mul_imag_nan700 ], [ %tmp703.imag, %complex_mul_libcall702 ] + %mul_ac707 = fmul double %arrayidx680.real, %real_mul_phi705 + %mul_bd708 = fmul double %arrayidx680.imag, %imag_mul_phi706 + %mul_ad709 = fmul double %arrayidx680.real, %imag_mul_phi706 + %mul_bc710 = fmul double %arrayidx680.imag, %real_mul_phi705 + %mul_r711 = fsub double %mul_ac707, %mul_bd708 + %mul_i712 = fadd double %mul_ad709, %mul_bc710 + %isnan_cmp713 = fcmp uno double %mul_r711, %mul_r711 + br i1 %isnan_cmp713, label %complex_mul_imag_nan714, label %complex_mul_cont718 + +complex_mul_imag_nan714: ; preds = %complex_mul_cont704 + %isnan_cmp715 = fcmp uno double %mul_i712, %mul_i712 + br i1 %isnan_cmp715, label %complex_mul_libcall716, label %complex_mul_cont718 + +complex_mul_libcall716: ; preds = %complex_mul_imag_nan714 + call void @__muldc3({ double, double }* align 1 %tmp717, double %arrayidx680.real, double %arrayidx680.imag, double %real_mul_phi705, double %imag_mul_phi706) #1 + %tmp717.real = load double, double* %tmp717.realp, align 1 + %tmp717.imag = load double, double* %tmp717.imagp, align 1 + br label %complex_mul_cont718 + +complex_mul_cont718: ; preds = %complex_mul_libcall716, %complex_mul_imag_nan714, %complex_mul_cont704 + %real_mul_phi719 = phi double [ %mul_r711, %complex_mul_cont704 ], [ %mul_r711, %complex_mul_imag_nan714 ], [ %tmp717.real, %complex_mul_libcall716 ] + %imag_mul_phi720 = phi double [ %mul_i712, %complex_mul_cont704 ], [ %mul_i712, %complex_mul_imag_nan714 ], [ %tmp717.imag, %complex_mul_libcall716 ] + %1185 = load i64, i64* %v_loop_2541, align 1 + %idxprom721 = trunc i64 %1185 to i32 + %arrayidx722.idx = mul i32 %idxprom721, 8 + %arrayidx722 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom721 + %arrayidx722.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx722, i32 0, i32 0 + %1186 = add i32 %arrayidx722.idx, 4 + %arrayidx722.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx722, i32 0, i32 1 + %1187 = sub i32 16, %arrayidx722.idx + %1188 = icmp ult i32 16, %arrayidx722.idx + %1189 = icmp ult i32 %1187, 4 + %1190 = or i1 %1188, %1189 + br i1 %1190, label %trap.loopexit70, label %1191 + +1191: ; preds = %complex_mul_cont718 + store double %real_mul_phi719, double* %arrayidx722.realp, align 1 + %1192 = sub i32 16, %1186 + %1193 = icmp ult i32 16, %1186 + %1194 = icmp ult i32 %1192, 4 + %1195 = or i1 %1193, %1194 + br i1 %1195, label %trap.loopexit70, label %1196 + +1196: ; preds = %1191 + store double %imag_mul_phi720, double* %arrayidx722.imagp, align 1 + %1197 = load i64, i64* %v_loop_2541, align 1 + %idxprom723 = trunc i64 %1197 to i32 + %arrayidx724.idx = mul i32 %idxprom723, 8 + %arrayidx724 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom723 + %arrayidx724.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx724, i32 0, i32 0 + %1198 = sub i32 16, %arrayidx724.idx + %1199 = icmp ult i32 16, %arrayidx724.idx + %1200 = icmp ult i32 %1198, 4 + %1201 = or i1 %1199, %1200 + br i1 %1201, label %trap.loopexit70, label %1202 + +1202: ; preds = %1196 + %arrayidx724.real = load double, double* %arrayidx724.realp, align 1 + %1203 = add i32 %arrayidx724.idx, 4 + %arrayidx724.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx724, i32 0, i32 1 + %1204 = sub i32 16, %1203 + %1205 = icmp ult i32 16, %1203 + %1206 = icmp ult i32 %1204, 4 + %1207 = or i1 %1205, %1206 + br i1 %1207, label %trap.loopexit70, label %1208 + +1208: ; preds = %1202 + %arrayidx724.imag = load double, double* %arrayidx724.imagp, align 1 + %1209 = load i64, i64* %v_loop_2541, align 1 + %idxprom725 = trunc i64 %1209 to i32 + %arrayidx726.idx = mul i32 %idxprom725, 8 + %arrayidx726 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_577, i32 0, i32 %idxprom725 + %arrayidx726.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx726, i32 0, i32 0 + %1210 = sub i32 16, %arrayidx726.idx + %1211 = icmp ult i32 16, %arrayidx726.idx + %1212 = icmp ult i32 %1210, 4 + %1213 = or i1 %1211, %1212 + br i1 %1213, label %trap.loopexit70, label %1214 + +1214: ; preds = %1208 + %arrayidx726.real = load double, double* %arrayidx726.realp, align 1 + %1215 = add i32 %arrayidx726.idx, 4 + %arrayidx726.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx726, i32 0, i32 1 + %1216 = sub i32 16, %1215 + %1217 = icmp ult i32 16, %1215 + %1218 = icmp ult i32 %1216, 4 + %1219 = or i1 %1217, %1218 + br i1 %1219, label %trap.loopexit70, label %1220 + +1220: ; preds = %1214 + %arrayidx726.imag = load double, double* %arrayidx726.imagp, align 1 + %mul_ac727 = fmul double %arrayidx724.real, %arrayidx726.real + %mul_bd728 = fmul double %arrayidx724.imag, %arrayidx726.imag + %mul_ad729 = fmul double %arrayidx724.real, %arrayidx726.imag + %mul_bc730 = fmul double %arrayidx724.imag, %arrayidx726.real + %mul_r731 = fsub double %mul_ac727, %mul_bd728 + %mul_i732 = fadd double %mul_ad729, %mul_bc730 + %isnan_cmp733 = fcmp uno double %mul_r731, %mul_r731 + br i1 %isnan_cmp733, label %complex_mul_imag_nan734, label %complex_mul_cont738 + +complex_mul_imag_nan734: ; preds = %1220 + %isnan_cmp735 = fcmp uno double %mul_i732, %mul_i732 + br i1 %isnan_cmp735, label %complex_mul_libcall736, label %complex_mul_cont738 + +complex_mul_libcall736: ; preds = %complex_mul_imag_nan734 + call void @__muldc3({ double, double }* align 1 %tmp737, double %arrayidx724.real, double %arrayidx724.imag, double %arrayidx726.real, double %arrayidx726.imag) #1 + %tmp737.real = load double, double* %tmp737.realp, align 1 + %tmp737.imag = load double, double* %tmp737.imagp, align 1 + br label %complex_mul_cont738 + +complex_mul_cont738: ; preds = %complex_mul_libcall736, %complex_mul_imag_nan734, %1220 + %real_mul_phi739 = phi double [ %mul_r731, %1220 ], [ %mul_r731, %complex_mul_imag_nan734 ], [ %tmp737.real, %complex_mul_libcall736 ] + %imag_mul_phi740 = phi double [ %mul_i732, %1220 ], [ %mul_i732, %complex_mul_imag_nan734 ], [ %tmp737.imag, %complex_mul_libcall736 ] + %1221 = load i64, i64* %v_loop_2541, align 1 + %idxprom741 = trunc i64 %1221 to i32 + %arrayidx742.idx = mul i32 %idxprom741, 8 + %arrayidx742 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom741 + %arrayidx742.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx742, i32 0, i32 0 + %1222 = add i32 %arrayidx742.idx, 4 + %arrayidx742.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx742, i32 0, i32 1 + %1223 = sub i32 16, %arrayidx742.idx + %1224 = icmp ult i32 16, %arrayidx742.idx + %1225 = icmp ult i32 %1223, 4 + %1226 = or i1 %1224, %1225 + br i1 %1226, label %trap.loopexit70, label %1227 + +1227: ; preds = %complex_mul_cont738 + store double %real_mul_phi739, double* %arrayidx742.realp, align 1 + %1228 = sub i32 16, %1222 + %1229 = icmp ult i32 16, %1222 + %1230 = icmp ult i32 %1228, 4 + %1231 = or i1 %1229, %1230 + br i1 %1231, label %trap.loopexit70, label %1232 + +1232: ; preds = %1227 + store double %imag_mul_phi740, double* %arrayidx742.imagp, align 1 + %1233 = load i64, i64* %v_loop_2541, align 1 + %idxprom743 = trunc i64 %1233 to i32 + %arrayidx744.idx = mul i32 %idxprom743, 8 + %arrayidx744 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_581, i32 0, i32 %idxprom743 + %arrayidx744.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx744, i32 0, i32 0 + %1234 = sub i32 16, %arrayidx744.idx + %1235 = icmp ult i32 16, %arrayidx744.idx + %1236 = icmp ult i32 %1234, 4 + %1237 = or i1 %1235, %1236 + br i1 %1237, label %trap.loopexit70, label %1238 + +1238: ; preds = %1232 + %arrayidx744.real = load double, double* %arrayidx744.realp, align 1 + %1239 = add i32 %arrayidx744.idx, 4 + %arrayidx744.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx744, i32 0, i32 1 + %1240 = sub i32 16, %1239 + %1241 = icmp ult i32 16, %1239 + %1242 = icmp ult i32 %1240, 4 + %1243 = or i1 %1241, %1242 + br i1 %1243, label %trap.loopexit70, label %1244 + +1244: ; preds = %1238 + %arrayidx744.imag = load double, double* %arrayidx744.imagp, align 1 + %1245 = load i64, i64* %v_loop_2541, align 1 + %idxprom745 = trunc i64 %1245 to i32 + %arrayidx746.idx = mul i32 %idxprom745, 8 + %arrayidx746 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_579, i32 0, i32 %idxprom745 + %arrayidx746.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx746, i32 0, i32 0 + %1246 = sub i32 16, %arrayidx746.idx + %1247 = icmp ult i32 16, %arrayidx746.idx + %1248 = icmp ult i32 %1246, 4 + %1249 = or i1 %1247, %1248 + br i1 %1249, label %trap.loopexit70, label %1250 + +1250: ; preds = %1244 + %arrayidx746.real = load double, double* %arrayidx746.realp, align 1 + %1251 = add i32 %arrayidx746.idx, 4 + %arrayidx746.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx746, i32 0, i32 1 + %1252 = sub i32 16, %1251 + %1253 = icmp ult i32 16, %1251 + %1254 = icmp ult i32 %1252, 4 + %1255 = or i1 %1253, %1254 + br i1 %1255, label %trap.loopexit70, label %1256 + +1256: ; preds = %1250 + %arrayidx746.imag = load double, double* %arrayidx746.imagp, align 1 + %add.r747 = fadd double %arrayidx744.real, %arrayidx746.real + %add.i748 = fadd double %arrayidx744.imag, %arrayidx746.imag + %1257 = load i64, i64* %v_loop_2541, align 1 + %idxprom749 = trunc i64 %1257 to i32 + %arrayidx750.idx = mul i32 %idxprom749, 8 + %arrayidx750 = getelementptr [2 x { double, double }], [2 x { double, double }]* @v_583, i32 0, i32 %idxprom749 + %arrayidx750.realp = getelementptr inbounds { double, double }, { double, double }* %arrayidx750, i32 0, i32 0 + %1258 = add i32 %arrayidx750.idx, 4 + %arrayidx750.imagp = getelementptr inbounds { double, double }, { double, double }* %arrayidx750, i32 0, i32 1 + %1259 = sub i32 16, %arrayidx750.idx + %1260 = icmp ult i32 16, %arrayidx750.idx + %1261 = icmp ult i32 %1259, 4 + %1262 = or i1 %1260, %1261 + br i1 %1262, label %trap.loopexit70, label %1263 + +1263: ; preds = %1256 + store double %add.r747, double* %arrayidx750.realp, align 1 + %1264 = sub i32 16, %1258 + %1265 = icmp ult i32 16, %1258 + %1266 = icmp ult i32 %1264, 4 + %1267 = or i1 %1265, %1266 + br i1 %1267, label %trap.loopexit70, label %for.inc751 + +for.inc751: ; preds = %1263 + store double %add.i748, double* %arrayidx750.imagp, align 1 + %1268 = load i64, i64* %v_loop_2541, align 1 + %inc752 = add i64 %1268, 1 + store i64 %inc752, i64* %v_loop_2541, align 1 + %cmp543 = icmp ult i64 %inc752, 2 + br i1 %cmp543, label %for.body546, label %for.inc761 + +for.inc761: ; preds = %for.inc751 + store i32 23, i32* %cleanup.dest.slot, align 1 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %800) #1 + %1269 = load volatile i24, i24* @v_266, align 1 + %1270 = load i16*, i16** @v_8, align 1 + %1271 = load i16, i16* %1270, align 1 + %rem754 = urem i16 %1271, 6 + %idxprom755 = zext i16 %rem754 to i32 + %arrayidx756 = getelementptr [6 x i24], [6 x i24]* @v_585, i32 0, i32 %idxprom755 + store volatile i24 %1269, i24* %arrayidx756, align 1 + call void @llvm.lifetime.start.p0i8(i64 2, i8* %801) #1 + %1272 = load i24, i24* @v_403, align 1 + %mul759 = call i24 @llvm.smul.fix.i24(i24 707, i24 %1272, i32 15) + %unsclear760 = and i24 %mul759, 8388607 + store i24 %unsclear760, i24* %v_586, align 1 + %1273 = load volatile i16, i16* @v_205, align 1 + store volatile i16 %1273, i16* @v_206, align 1 + call void @llvm.lifetime.end.p0i8(i64 2, i8* %802) #1 + call void @llvm.lifetime.end.p0i8(i64 1, i8* %803) #1 + call void @llvm.lifetime.end.p0i8(i64 2, i8* %804) #1 + %inc762 = add i16 %inc7629092, 1 + %cmp528 = icmp ult i16 %inc762, 73 + br i1 %cmp528, label %for.body531, label %for.end763 + +for.end763: ; preds = %for.inc761 + %inc76290.lcssa = phi i16 [ %inc762, %for.inc761 ] + store i16 %inc76290.lcssa, i16* %v_562, align 1 + store i32 20, i32* %cleanup.dest.slot, align 1 + %1274 = bitcast i16* %v_562 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1274) #1 + %1275 = bitcast { i16, i16 }* %v_587 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %1275) #1 + %1276 = load { i16, i16 }*, { i16, i16 }** @v_2, align 1 + %.realp764 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %1276, i32 0, i32 0 + %.real765 = load i16, i16* %.realp764, align 1 + %.imagp766 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %1276, i32 0, i32 1 + %.imag767 = load i16, i16* %.imagp766, align 1 + %v_587.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_587, i32 0, i32 0 + %v_587.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_587, i32 0, i32 1 + store i16 %.real765, i16* %v_587.realp, align 1 + store i16 %.imag767, i16* %v_587.imagp, align 1 + %1277 = load i16, i16* @v_14, align 1 + %mul768 = call i16 @llvm.smul.fix.i16(i16 3, i16 %1277, i32 15) + %unsclear769 = and i16 %mul768, 32767 + %1278 = load i16, i16* @v_14, align 1 + %mul770 = call i16 @llvm.smul.fix.i16(i16 %unsclear769, i16 %1278, i32 15) + %unsclear771 = and i16 %mul770, 32767 + %resize772 = zext i16 %unsclear771 to i24 + store volatile i24 %resize772, i24* @v_266, align 1 + %1279 = bitcast i16* %v_588 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1279) #1 + store i16 75, i16* %v_588, align 1 + %1280 = load { i24, i24 }***, { i24, i24 }**** @v_592, align 1 + %1281 = bitcast { float, float }* %v_593 to i8* + %v_593.realp = getelementptr inbounds { float, float }, { float, float }* %v_593, i32 0, i32 0 + %v_593.imagp = getelementptr inbounds { float, float }, { float, float }* %v_593, i32 0, i32 1 + %1282 = load [9 x { i24, i24 }*]*, [9 x { i24, i24 }*]** @v_597, align 1 + %1283 = bitcast i64* %y0782 to i8* + %1284 = load i64, i64* @v_456, align 1 + %1285 = bitcast i64* %y1783 to i8* + %1286 = bitcast i64* %y1783 to i8* + %1287 = bitcast i64* %y0782 to i8* + %1288 = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_43, i32 0, i32 1), align 1 + %rem798 = urem i32 %1288, 3 + %arrayidx799.idx = mul i32 %rem798, 4 + %arrayidx799 = getelementptr [3 x { i24, i24 }], [3 x { i24, i24 }]* @v_538, i32 0, i32 %rem798 + %arrayidx799.realp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %arrayidx799, i32 0, i32 0 + %1289 = add i32 %arrayidx799.idx, 2 + %arrayidx799.imagp = getelementptr inbounds { i24, i24 }, { i24, i24 }* %arrayidx799, i32 0, i32 1 + %1290 = sub i32 12, %arrayidx799.idx + %1291 = icmp ult i32 12, %arrayidx799.idx + %1292 = icmp ult i32 %1290, 2 + %1293 = or i1 %1291, %1292 + %1294 = sub i32 12, %1289 + %1295 = icmp ult i32 12, %1289 + %1296 = icmp ult i32 %1294, 2 + %1297 = or i1 %1295, %1296 + %v_154.real = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_154, i32 0, i32 0), align 1 + %v_154.imag = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_154, i32 0, i32 1), align 1 + %1298 = bitcast { float, float }* %v_593 to i8* + %tmp784.promoted = load i64, i64* %tmp784, align 1 + %v_588.promoted = load i16, i16* %v_588, align 1 + br label %for.body777 + +for.body777: ; preds = %for.inc800, %for.end763 + %inc8019496 = phi i16 [ %v_588.promoted, %for.end763 ], [ %inc801, %for.inc800 ] + %1299 = load { i24, i24 }**, { i24, i24 }*** %1280, align 1 + %1300 = load { i24, i24 }*, { i24, i24 }** %1299, align 1 + %.realp778 = getelementptr inbounds { i24, i24 }, { i24, i24 }* %1300, i32 0, i32 0 + %.real779 = load i24, i24* %.realp778, align 1 + %.imagp780 = getelementptr inbounds { i24, i24 }, { i24, i24 }* %1300, i32 0, i32 1 + %.imag781 = load i24, i24* %.imagp780, align 1 + store i24 %.real779, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_589, i32 0, i32 0), align 1 + store i24 %.imag781, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_589, i32 0, i32 1), align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1281) #1 + store volatile float 0xC087BE3C40000000, float* %v_593.realp, align 1 + store volatile float 0x405F0F1700000000, float* %v_593.imagp, align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1283) #1 + store i64 %1284, i64* %y0782, align 1 + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1285) #1 + store i64 274877906943, i64* %y1783, align 1 + %1301 = load i64, i64* %y1783, align 1 + %cmp785 = icmp uge i64 %1301, 64 + br i1 %cmp785, label %cond.true787, label %cond.false788 + +cond.true787: ; preds = %for.body777 + %1302 = load i64, i64* %y0782, align 1 + br label %cond.end789 + +cond.false788: ; preds = %for.body777 + %1303 = load i64, i64* %y0782, align 1 + %1304 = load i64, i64* %y1783, align 1 + %shr = lshr i64 %1303, %1304 + br label %cond.end789 + +cond.end789: ; preds = %cond.false788, %cond.true787 + %cond790 = phi i64 [ %1302, %cond.true787 ], [ %shr, %cond.false788 ] + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1286) #1 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1287) #1 + %rem791 = urem i64 %cond790, 9 + %idxprom792 = trunc i64 %rem791 to i32 + %arrayidx793 = getelementptr [9 x { i24, i24 }*], [9 x { i24, i24 }*]* %1282, i32 0, i32 %idxprom792 + %1305 = load { i24, i24 }*, { i24, i24 }** %arrayidx793, align 1 + %.realp794 = getelementptr inbounds { i24, i24 }, { i24, i24 }* %1305, i32 0, i32 0 + %.real795 = load i24, i24* %.realp794, align 1 + %.imagp796 = getelementptr inbounds { i24, i24 }, { i24, i24 }* %1305, i32 0, i32 1 + %.imag797 = load i24, i24* %.imagp796, align 1 + br i1 %1293, label %trap.loopexit69, label %1306 + +1306: ; preds = %cond.end789 + store i24 %.real795, i24* %arrayidx799.realp, align 1 + br i1 %1297, label %trap.loopexit69, label %for.inc800 + +for.inc800: ; preds = %1306 + store i24 %.imag797, i24* %arrayidx799.imagp, align 1 + store i16 %v_154.real, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_110, i32 0, i32 0), align 1 + store i16 %v_154.imag, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_110, i32 0, i32 1), align 1 + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1298) #1 + %inc801 = add i16 %inc8019496, 1 + %cmp774 = icmp slt i16 %inc801, 78 + br i1 %cmp774, label %for.body777, label %for.end802 + +for.end802: ; preds = %for.inc800 + %inc80194.lcssa = phi i16 [ %inc801, %for.inc800 ] + %.lcssa93 = phi i64 [ %cond790, %for.inc800 ] + store i64 %.lcssa93, i64* %tmp784, align 1 + store i16 %inc80194.lcssa, i16* %v_588, align 1 + store i32 26, i32* %cleanup.dest.slot, align 1 + %1307 = bitcast i16* %v_588 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1307) #1 + %1308 = load { i16, i16 }*, { i16, i16 }** @v_41, align 1 + store { i16, i16 }* %1308, { i16, i16 }** @v_599, align 1 + %v_152.real = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_152, i32 0, i32 0), align 1 + %v_152.imag = load volatile float, float* getelementptr inbounds ({ float, float }, { float, float }* @v_152, i32 0, i32 1), align 1 + %1309 = load i24, i24* @v_420, align 1 + %v_489.real = load i64, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_489, i32 0, i32 0), align 1 + %v_489.imag = load i64, i64* getelementptr inbounds ({ i64, i64 }, { i64, i64 }* @v_489, i32 0, i32 1), align 1 + %1310 = load i32, i32* @v_600, align 1 + %indirect-arg-temp803.realp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp803, i32 0, i32 0 + %indirect-arg-temp803.imagp = getelementptr inbounds { float, float }, { float, float }* %indirect-arg-temp803, i32 0, i32 1 + store float %v_152.real, float* %indirect-arg-temp803.realp, align 1 + store float %v_152.imag, float* %indirect-arg-temp803.imagp, align 1 + %indirect-arg-temp804.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %indirect-arg-temp804, i32 0, i32 0 + %indirect-arg-temp804.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %indirect-arg-temp804, i32 0, i32 1 + store i64 %v_489.real, i64* %indirect-arg-temp804.realp, align 1 + store i64 %v_489.imag, i64* %indirect-arg-temp804.imagp, align 1 + %1311 = bitcast { i40, i40 }* %v_623 to i8* + call void @llvm.lifetime.start.p0i8(i64 6, i8* %1311) #1 + %v_623.realp = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_623, i32 0, i32 0 + %v_623.imagp = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_623, i32 0, i32 1 + store i40 -49213663653, i40* %v_623.realp, align 1 + store i40 -47942585777, i40* %v_623.imagp, align 1 + %1312 = bitcast double* %v_627 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1312) #1 + %1313 = load double**, double*** @v_626, align 1 + %1314 = load double*, double** %1313, align 1 + %1315 = load double, double* %1314, align 1 + %1316 = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @v_64, i32 0, i32 0), align 1 + %conv805 = sitofp i32 %1316 to double + %add806 = fadd double %1315, %conv805 + store double %add806, double* %v_627, align 1 + %v_334.real = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_334, i32 0, i32 0), align 1 + %v_334.imag = load i16, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_334, i32 0, i32 1), align 1 + store i16 %v_334.real, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_378, i32 0, i32 0), align 1 + store i16 %v_334.imag, i16* getelementptr inbounds ({ i16, i16 }, { i16, i16 }* @v_378, i32 0, i32 1), align 1 + %1317 = bitcast i24* %v_628 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %1317) #1 + store volatile i24 2105, i24* %v_628, align 1 + %1318 = load i64*, i64** @v_633, align 1 + %1319 = load i64, i64* %1318, align 1 + %rem807 = urem i64 %1319, 4 + %idxprom808 = trunc i64 %rem807 to i32 + %idxprom808.c = trunc i32 %idxprom808 to i16 + %1320 = add i16 0, %idxprom808.c + %arrayidx809 = getelementptr [4 x i24*], [4 x i24*]* @v_631, i32 0, i32 %idxprom808 + %1321 = sub i16 4, %1320 + %1322 = load i24*, i24** %arrayidx809, align 1 + %1323 = load i24, i24* %1322, align 1 + store i24 %1323, i24* @v_349, align 1 + store float 0x4088B163A0000000, float* @v_440, align 1 + %v_559.realp810 = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_559, i32 0, i32 0 + %v_559.real = load i40, i40* %v_559.realp810, align 1 + %v_559.imagp811 = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_559, i32 0, i32 1 + %v_559.imag = load i40, i40* %v_559.imagp811, align 1 + %v_623.realp812 = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_623, i32 0, i32 0 + %v_623.imagp813 = getelementptr inbounds { i40, i40 }, { i40, i40 }* %v_623, i32 0, i32 1 + store i40 %v_559.real, i40* %v_623.realp812, align 1 + store i40 %v_559.imag, i40* %v_623.imagp813, align 1 + %1324 = bitcast { i16, i16 }* %v_637 to i8* + call void @llvm.lifetime.start.p0i8(i64 2, i8* %1324) #1 + %1325 = load { i16, i16 }**, { i16, i16 }*** @v_636, align 1 + %1326 = load { i16, i16 }*, { i16, i16 }** %1325, align 1 + %.realp814 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %1326, i32 0, i32 0 + %.real815 = load i16, i16* %.realp814, align 1 + %.imagp816 = getelementptr inbounds { i16, i16 }, { i16, i16 }* %1326, i32 0, i32 1 + %.imag817 = load i16, i16* %.imagp816, align 1 + %v_637.realp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_637, i32 0, i32 0 + %v_637.imagp = getelementptr inbounds { i16, i16 }, { i16, i16 }* %v_637, i32 0, i32 1 + store i16 %.real815, i16* %v_637.realp, align 1 + store i16 %.imag817, i16* %v_637.imagp, align 1 + %1327 = bitcast %struct.s_2* %v_640 to i8* + call void @llvm.lifetime.start.p0i8(i64 14, i8* %1327) #1 + %f0 = getelementptr inbounds %struct.s_2, %struct.s_2* %v_640, i32 0, i32 0 + %1328 = bitcast double* %y0818 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1328) #1 + %1329 = load double*, double** @v_639, align 1 + %1330 = load volatile double, double* %1329, align 1 + store double %1330, double* %y0818, align 1 + %1331 = bitcast double* %y1819 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1331) #1 + %1332 = bitcast double* %y0820 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1332) #1 + %1333 = load volatile double, double* @v_358, align 1 + store double %1333, double* %y0820, align 1 + %1334 = bitcast double* %y1821 to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* %1334) #1 + %1335 = load volatile double, double* @v_638, align 1 + store double %1335, double* %y1821, align 1 + %1336 = load double, double* %y0820, align 1 + %cmp823 = fcmp oeq double %1336, 0.000000e+00 + br i1 %cmp823, label %land.lhs.true825, label %cond.false829 + +land.lhs.true825: ; preds = %for.end802 + %1337 = load double, double* %y1821, align 1 + %cmp826 = fcmp oeq double %1337, 0.000000e+00 + br i1 %cmp826, label %cond.true828, label %cond.false829 + +cond.true828: ; preds = %land.lhs.true825 + %1338 = load double, double* %y0820, align 1 + br label %cond.end831 + +cond.false829: ; preds = %land.lhs.true825, %for.end802 + %1339 = load double, double* %y0820, align 1 + %1340 = load double, double* %y1821, align 1 + %div830 = fdiv double %1339, %1340 + br label %cond.end831 + +cond.end831: ; preds = %cond.false829, %cond.true828 + %cond832 = phi double [ %1338, %cond.true828 ], [ %div830, %cond.false829 ] + store double %cond832, double* %tmp822, align 1 + %1341 = bitcast double* %y1821 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1341) #1 + %1342 = bitcast double* %y0820 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1342) #1 + %1343 = load double, double* %tmp822, align 1 + store double %1343, double* %y1819, align 1 + %1344 = load double, double* %y0818, align 1 + %cmp834 = fcmp oeq double %1344, 0.000000e+00 + br i1 %cmp834, label %land.lhs.true836, label %cond.false840 + +land.lhs.true836: ; preds = %cond.end831 + %1345 = load double, double* %y1819, align 1 + %cmp837 = fcmp oeq double %1345, 0.000000e+00 + br i1 %cmp837, label %cond.true839, label %cond.false840 + +cond.true839: ; preds = %land.lhs.true836 + %1346 = load double, double* %y0818, align 1 + br label %cond.end842 + +cond.false840: ; preds = %land.lhs.true836, %cond.end831 + %1347 = load double, double* %y0818, align 1 + %1348 = load double, double* %y1819, align 1 + %div841 = fdiv double %1347, %1348 + br label %cond.end842 + +cond.end842: ; preds = %cond.false840, %cond.true839 + %cond843 = phi double [ %1346, %cond.true839 ], [ %div841, %cond.false840 ] + store double %cond843, double* %tmp833, align 1 + %1349 = bitcast double* %y1819 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1349) #1 + %1350 = bitcast double* %y0818 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1350) #1 + %1351 = load double, double* %tmp833, align 1 + store volatile double %1351, double* %f0, align 1 + %f1 = getelementptr inbounds %struct.s_2, %struct.s_2* %v_640, i32 0, i32 1 + %1352 = load volatile double, double* @v_133, align 1 + store volatile double %1352, double* %f1, align 1 + %f2 = getelementptr inbounds %struct.s_2, %struct.s_2* %v_640, i32 0, i32 2 + %1353 = load volatile i24, i24* getelementptr inbounds ({ i24, i24 }, { i24, i24 }* @v_146, i32 0, i32 1), align 1 + store volatile i24 %1353, i24* %f2, align 1 + %f3 = getelementptr inbounds %struct.s_2, %struct.s_2* %v_640, i32 0, i32 3 + %f3.realp = getelementptr inbounds { float, float }, { float, float }* %f3, i32 0, i32 0 + %f3.imagp = getelementptr inbounds { float, float }, { float, float }* %f3, i32 0, i32 1 + store volatile float 0xC06A901220000000, float* %f3.realp, align 1 + store volatile float 0xC08EFBAD40000000, float* %f3.imagp, align 1 + %1354 = load i64, i64* @v_4, align 1 + %conv844 = uitofp i64 %1354 to double + call void @modify_checksum(double %conv844) + %1355 = load i16, i16* @v_6, align 1 + %conv845 = uitofp i16 %1355 to double + call void @modify_checksum(double %conv845) + %1356 = load i16, i16* @v_7, align 1 + %conv846 = uitofp i16 %1356 to double + call void @modify_checksum(double %conv846) + %1357 = load i24, i24* @v_12, align 1 + %1358 = uitofp i24 %1357 to double + %1359 = fmul double %1358, 0x3F00000000000000 + call void @modify_checksum(double %1359) + %1360 = load i16, i16* @v_14, align 1 + %1361 = uitofp i16 %1360 to double + %1362 = fmul double %1361, 0x3F00000000000000 + call void @modify_checksum(double %1362) + %1363 = load i32, i32* @v_16, align 1 + %conv847 = sitofp i32 %1363 to double + call void @modify_checksum(double %conv847) + call void @modify_checksum(double -1.638200e+04) + %1364 = load i40, i40* @v_32, align 1 + %1365 = sitofp i40 %1364 to double + %1366 = fmul double %1365, 0x3E00000000000000 + call void @modify_checksum(double %1366) + %1367 = load i32, i32* @v_47, align 1 + %conv848 = uitofp i32 %1367 to double + call void @modify_checksum(double %conv848) + %1368 = load i16, i16* @v_48, align 1 + %conv849 = sitofp i16 %1368 to double + call void @modify_checksum(double %conv849) + call void @modify_checksum(double -2.890100e+04) + %1369 = load volatile i16, i16* @v_57, align 1 + %conv850 = uitofp i16 %1369 to double + call void @modify_checksum(double %conv850) + %1370 = load volatile i16, i16* @v_69, align 1 + %conv851 = uitofp i16 %1370 to double + call void @modify_checksum(double %conv851) + %1371 = load volatile i16, i16* @v_71, align 1 + %conv852 = uitofp i16 %1371 to double + call void @modify_checksum(double %conv852) + %1372 = load i64, i64* @v_73, align 1 + %conv853 = sitofp i64 %1372 to double + call void @modify_checksum(double %conv853) + %1373 = load i64, i64* @v_75, align 1 + %conv854 = sitofp i64 %1373 to double + call void @modify_checksum(double %conv854) + %1374 = load i64, i64* @v_76, align 1 + %conv855 = sitofp i64 %1374 to double + call void @modify_checksum(double %conv855) + %1375 = load i64, i64* @v_89, align 1 + %conv856 = uitofp i64 %1375 to double + call void @modify_checksum(double %conv856) + %1376 = load i64, i64* @v_92, align 1 + %conv857 = sitofp i64 %1376 to double + call void @modify_checksum(double %conv857) + %1377 = load i64, i64* @v_95, align 1 + %conv858 = sitofp i64 %1377 to double + call void @modify_checksum(double %conv858) + %1378 = load i64, i64* @v_97, align 1 + %conv859 = sitofp i64 %1378 to double + call void @modify_checksum(double %conv859) + %1379 = bitcast i16* %v_loop_0 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1379) #1 + store i16 0, i16* %v_loop_0, align 1 + %v_loop_0.promoted = load i16, i16* %v_loop_0, align 1 + br label %for.body864 + +for.body864: ; preds = %for.inc868, %cond.end842 + %inc8699799 = phi i16 [ %v_loop_0.promoted, %cond.end842 ], [ %inc869, %for.inc868 ] + %idxprom865 = sext i16 %inc8699799 to i32 + %arrayidx866.idx = mul i16 %inc8699799, 4 + %arrayidx866 = getelementptr [40 x i64], [40 x i64]* @v_98, i32 0, i32 %idxprom865 + %1380 = sub i16 160, %arrayidx866.idx + %1381 = icmp ult i16 160, %arrayidx866.idx + %1382 = icmp ult i16 %1380, 4 + %1383 = or i1 %1381, %1382 + br i1 %1383, label %trap.loopexit68, label %for.inc868 + +for.inc868: ; preds = %for.body864 + %1384 = load i64, i64* %arrayidx866, align 1 + %conv867 = sitofp i64 %1384 to double + call void @modify_checksum(double %conv867) + %inc869 = add i16 %inc8699799, 1 + %cmp861 = icmp slt i16 %inc869, 40 + br i1 %cmp861, label %for.body864, label %for.end870 + +for.end870: ; preds = %for.inc868 + %inc86997.lcssa = phi i16 [ %inc869, %for.inc868 ] + store i16 %inc86997.lcssa, i16* %v_loop_0, align 1 + store i32 29, i32* %cleanup.dest.slot, align 1 + %1385 = bitcast i16* %v_loop_0 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1385) #1 + %1386 = load i64, i64* @v_99, align 1 + %conv871 = sitofp i64 %1386 to double + call void @modify_checksum(double %conv871) + %1387 = bitcast i16* %v_loop_0872 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1387) #1 + store i16 0, i16* %v_loop_0872, align 1 + %v_loop_0872.promoted = load i16, i16* %v_loop_0872, align 1 + br label %for.body877 + +for.body877: ; preds = %for.inc881, %for.end870 + %inc882100102 = phi i16 [ %v_loop_0872.promoted, %for.end870 ], [ %inc882, %for.inc881 ] + %idxprom878 = sext i16 %inc882100102 to i32 + %arrayidx879.idx = mul i16 %inc882100102, 4 + %arrayidx879 = getelementptr [40 x i64], [40 x i64]* @v_100, i32 0, i32 %idxprom878 + %1388 = sub i16 160, %arrayidx879.idx + %1389 = icmp ult i16 160, %arrayidx879.idx + %1390 = icmp ult i16 %1388, 4 + %1391 = or i1 %1389, %1390 + br i1 %1391, label %trap.loopexit67, label %for.inc881 + +for.inc881: ; preds = %for.body877 + %1392 = load i64, i64* %arrayidx879, align 1 + %conv880 = sitofp i64 %1392 to double + call void @modify_checksum(double %conv880) + %inc882 = add i16 %inc882100102, 1 + %cmp874 = icmp slt i16 %inc882, 40 + br i1 %cmp874, label %for.body877, label %for.end883 + +for.end883: ; preds = %for.inc881 + %inc882100.lcssa = phi i16 [ %inc882, %for.inc881 ] + store i16 %inc882100.lcssa, i16* %v_loop_0872, align 1 + store i32 32, i32* %cleanup.dest.slot, align 1 + %1393 = bitcast i16* %v_loop_0872 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1393) #1 + %1394 = load i64, i64* @v_101, align 1 + %conv884 = sitofp i64 %1394 to double + call void @modify_checksum(double %conv884) + %1395 = bitcast i16* %v_loop_0885 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1395) #1 + store i16 0, i16* %v_loop_0885, align 1 + %v_loop_0885.promoted = load i16, i16* %v_loop_0885, align 1 + br label %for.body890 + +for.body890: ; preds = %for.inc894, %for.end883 + %inc895103105 = phi i16 [ %v_loop_0885.promoted, %for.end883 ], [ %inc895, %for.inc894 ] + %idxprom891 = sext i16 %inc895103105 to i32 + %arrayidx892.idx = mul i16 %inc895103105, 4 + %arrayidx892 = getelementptr [40 x i64], [40 x i64]* @v_102, i32 0, i32 %idxprom891 + %1396 = sub i16 160, %arrayidx892.idx + %1397 = icmp ult i16 160, %arrayidx892.idx + %1398 = icmp ult i16 %1396, 4 + %1399 = or i1 %1397, %1398 + br i1 %1399, label %trap.loopexit66, label %for.inc894 + +for.inc894: ; preds = %for.body890 + %1400 = load i64, i64* %arrayidx892, align 1 + %conv893 = sitofp i64 %1400 to double + call void @modify_checksum(double %conv893) + %inc895 = add i16 %inc895103105, 1 + %cmp887 = icmp slt i16 %inc895, 40 + br i1 %cmp887, label %for.body890, label %for.end896 + +for.end896: ; preds = %for.inc894 + %inc895103.lcssa = phi i16 [ %inc895, %for.inc894 ] + store i16 %inc895103.lcssa, i16* %v_loop_0885, align 1 + store i32 35, i32* %cleanup.dest.slot, align 1 + %1401 = bitcast i16* %v_loop_0885 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1401) #1 + %1402 = load i32, i32* @v_104, align 1 + %1403 = sitofp i32 %1402 to double + %1404 = fmul double %1403, 0x3E00000000000000 + call void @modify_checksum(double %1404) + %1405 = load i16, i16* @v_111, align 1 + %1406 = sitofp i16 %1405 to double + %1407 = fmul double %1406, 0x3F00000000000000 + call void @modify_checksum(double %1407) + %1408 = load i32, i32* @v_118, align 1 + %1409 = sitofp i32 %1408 to double + %1410 = fmul double %1409, 0x3E00000000000000 + call void @modify_checksum(double %1410) + %1411 = bitcast i16* %v_loop_0897 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1411) #1 + store i16 0, i16* %v_loop_0897, align 1 + %v_loop_0897.promoted = load i16, i16* %v_loop_0897, align 1 + br label %for.body902 + +for.body902: ; preds = %for.inc905, %for.end896 + %inc906106108 = phi i16 [ %v_loop_0897.promoted, %for.end896 ], [ %inc906, %for.inc905 ] + %idxprom903 = sext i16 %inc906106108 to i32 + %arrayidx904.idx = mul i32 %idxprom903, 2 + %arrayidx904 = getelementptr [6 x i32], [6 x i32]* @v_119, i32 0, i32 %idxprom903 + %1412 = sub i32 12, %arrayidx904.idx + %1413 = icmp ult i32 12, %arrayidx904.idx + %1414 = icmp ult i32 %1412, 2 + %1415 = or i1 %1413, %1414 + br i1 %1415, label %trap.loopexit65, label %for.inc905 + +for.inc905: ; preds = %for.body902 + %1416 = load i32, i32* %arrayidx904, align 1 + %1417 = sitofp i32 %1416 to double + %1418 = fmul double %1417, 0x3E00000000000000 + call void @modify_checksum(double %1418) + %inc906 = add i16 %inc906106108, 1 + %cmp899 = icmp slt i16 %inc906, 6 + br i1 %cmp899, label %for.body902, label %for.end907 + +for.end907: ; preds = %for.inc905 + %inc906106.lcssa = phi i16 [ %inc906, %for.inc905 ] + store i16 %inc906106.lcssa, i16* %v_loop_0897, align 1 + store i32 38, i32* %cleanup.dest.slot, align 1 + %1419 = bitcast i16* %v_loop_0897 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1419) #1 + %1420 = load i32, i32* @v_120, align 1 + %1421 = sitofp i32 %1420 to double + %1422 = fmul double %1421, 0x3E00000000000000 + call void @modify_checksum(double %1422) + %1423 = load volatile i24, i24* @v_122, align 1 + %1424 = uitofp i24 %1423 to double + %1425 = fmul double %1424, 0x3F00000000000000 + call void @modify_checksum(double %1425) + %1426 = load i24, i24* @v_125, align 1 + %1427 = uitofp i24 %1426 to double + %1428 = fmul double %1427, 0x3F00000000000000 + call void @modify_checksum(double %1428) + %1429 = load i64, i64* @v_126, align 1 + %conv908 = sitofp i64 %1429 to double + call void @modify_checksum(double %conv908) + %1430 = load i24, i24* @v_128, align 1 + %1431 = uitofp i24 %1430 to double + %1432 = fmul double %1431, 0x3F00000000000000 + call void @modify_checksum(double %1432) + %1433 = load volatile double, double* @v_131, align 1 + call void @modify_checksum(double %1433) + %1434 = load volatile double, double* @v_133, align 1 + call void @modify_checksum(double %1434) + %1435 = load volatile double, double* @v_134, align 1 + call void @modify_checksum(double %1435) + %1436 = load i24, i24* @v_137, align 1 + %1437 = uitofp i24 %1436 to double + %1438 = fmul double %1437, 0x3F00000000000000 + call void @modify_checksum(double %1438) + %1439 = load volatile i16, i16* @v_139, align 1 + %conv909 = uitofp i16 %1439 to double + call void @modify_checksum(double %conv909) + %1440 = load volatile i16, i16* @v_142, align 1 + %conv910 = uitofp i16 %1440 to double + call void @modify_checksum(double %conv910) + %1441 = load i40, i40* @v_156, align 1 + %1442 = sitofp i40 %1441 to double + %1443 = fmul double %1442, 0x3E00000000000000 + call void @modify_checksum(double %1443) + %1444 = load volatile i24, i24* @v_161, align 1 + %1445 = uitofp i24 %1444 to double + %1446 = fmul double %1445, 0x3F00000000000000 + call void @modify_checksum(double %1446) + %1447 = load volatile i24, i24* @v_163, align 1 + %1448 = uitofp i24 %1447 to double + %1449 = fmul double %1448, 0x3F00000000000000 + call void @modify_checksum(double %1449) + %1450 = load volatile i16, i16* @v_168, align 1 + %conv911 = sitofp i16 %1450 to double + call void @modify_checksum(double %conv911) + %1451 = load i24, i24* @v_181, align 1 + %1452 = uitofp i24 %1451 to double + %1453 = fmul double %1452, 0x3F00000000000000 + call void @modify_checksum(double %1453) + %1454 = load volatile i16, i16* @v_183, align 1 + %conv912 = uitofp i16 %1454 to double + call void @modify_checksum(double %conv912) + %1455 = load i24, i24* @v_198, align 1 + %1456 = uitofp i24 %1455 to double + %1457 = fmul double %1456, 0x3F00000000000000 + call void @modify_checksum(double %1457) + %1458 = bitcast i16* %v_loop_0913 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1458) #1 + store i16 0, i16* %v_loop_0913, align 1 + %v_loop_0913.promoted = load i16, i16* %v_loop_0913, align 1 + br label %for.body918 + +for.body918: ; preds = %for.inc921, %for.end907 + %inc922109111 = phi i16 [ %v_loop_0913.promoted, %for.end907 ], [ %inc922, %for.inc921 ] + %idxprom919 = sext i16 %inc922109111 to i32 + %arrayidx920.idx = mul i16 %inc922109111, 2 + %arrayidx920 = getelementptr [8 x i24], [8 x i24]* @v_199, i32 0, i32 %idxprom919 + %1459 = sub i16 16, %arrayidx920.idx + %1460 = icmp ult i16 16, %arrayidx920.idx + %1461 = icmp ult i16 %1459, 2 + %1462 = or i1 %1460, %1461 + br i1 %1462, label %trap.loopexit64, label %for.inc921 + +for.inc921: ; preds = %for.body918 + %1463 = load i24, i24* %arrayidx920, align 1 + %1464 = uitofp i24 %1463 to double + %1465 = fmul double %1464, 0x3F00000000000000 + call void @modify_checksum(double %1465) + %inc922 = add i16 %inc922109111, 1 + %cmp915 = icmp slt i16 %inc922, 8 + br i1 %cmp915, label %for.body918, label %for.end923 + +for.end923: ; preds = %for.inc921 + %inc922109.lcssa = phi i16 [ %inc922, %for.inc921 ] + store i16 %inc922109.lcssa, i16* %v_loop_0913, align 1 + store i32 41, i32* %cleanup.dest.slot, align 1 + %1466 = bitcast i16* %v_loop_0913 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1466) #1 + %1467 = load i64, i64* @v_200, align 1 + %conv924 = sitofp i64 %1467 to double + call void @modify_checksum(double %conv924) + %1468 = load i32, i32* @v_203, align 1 + %1469 = sitofp i32 %1468 to double + %1470 = fmul double %1469, 0x3E00000000000000 + call void @modify_checksum(double %1470) + %1471 = load volatile i16, i16* @v_205, align 1 + %conv925 = sitofp i16 %1471 to double + call void @modify_checksum(double %conv925) + %1472 = load volatile i16, i16* @v_206, align 1 + %conv926 = sitofp i16 %1472 to double + call void @modify_checksum(double %conv926) + %1473 = load double, double* @v_207, align 1 + call void @modify_checksum(double %1473) + %1474 = load volatile i16, i16* @v_208, align 1 + %conv927 = uitofp i16 %1474 to double + call void @modify_checksum(double %conv927) + %1475 = load float, float* @v_214, align 1 + %conv928 = fpext float %1475 to double + call void @modify_checksum(double %conv928) + %1476 = load i32, i32* @v_215, align 1 + %conv929 = uitofp i32 %1476 to double + call void @modify_checksum(double %conv929) + %1477 = load i32, i32* @v_216, align 1 + %conv930 = uitofp i32 %1477 to double + call void @modify_checksum(double %conv930) + %1478 = load volatile i16, i16* @v_218, align 1 + %conv931 = sitofp i16 %1478 to double + call void @modify_checksum(double %conv931) + %1479 = load i64, i64* @v_222, align 1 + %conv932 = uitofp i64 %1479 to double + call void @modify_checksum(double %conv932) + %1480 = load i40, i40* @v_225, align 1 + %1481 = sitofp i40 %1480 to double + %1482 = fmul double %1481, 0x3E00000000000000 + call void @modify_checksum(double %1482) + %1483 = load i32, i32* @v_228, align 1 + %conv933 = uitofp i32 %1483 to double + call void @modify_checksum(double %conv933) + %1484 = load volatile i24, i24* @v_230, align 1 + %1485 = uitofp i24 %1484 to double + %1486 = fmul double %1485, 0x3F00000000000000 + call void @modify_checksum(double %1486) + %1487 = load volatile i16, i16* @v_239, align 1 + %conv934 = sitofp i16 %1487 to double + call void @modify_checksum(double %conv934) + %1488 = load i32, i32* @v_240, align 1 + %1489 = sitofp i32 %1488 to double + %1490 = fmul double %1489, 0x3E00000000000000 + call void @modify_checksum(double %1490) + %1491 = load i32, i32* @v_242, align 1 + %1492 = sitofp i32 %1491 to double + %1493 = fmul double %1492, 0x3E00000000000000 + call void @modify_checksum(double %1493) + %1494 = load i24, i24* @v_243, align 1 + %1495 = sitofp i24 %1494 to double + %1496 = fmul double %1495, 0x3F00000000000000 + call void @modify_checksum(double %1496) + %1497 = load i24, i24* @v_244, align 1 + %1498 = sitofp i24 %1497 to double + %1499 = fmul double %1498, 0x3F00000000000000 + call void @modify_checksum(double %1499) + %1500 = load i32, i32* @v_245, align 1 + %conv935 = sitofp i32 %1500 to double + call void @modify_checksum(double %conv935) + %1501 = load float, float* @v_246, align 1 + %conv936 = fpext float %1501 to double + call void @modify_checksum(double %conv936) + %1502 = load volatile i16, i16* @v_247, align 1 + %conv937 = sitofp i16 %1502 to double + call void @modify_checksum(double %conv937) + %1503 = load float, float* @v_248, align 1 + %conv938 = fpext float %1503 to double + call void @modify_checksum(double %conv938) + %1504 = load float, float* @v_252, align 1 + %conv939 = fpext float %1504 to double + call void @modify_checksum(double %conv939) + %1505 = bitcast i16* %v_loop_0940 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1505) #1 + store i16 0, i16* %v_loop_0940, align 1 + %v_loop_0940.promoted = load i16, i16* %v_loop_0940, align 1 + br label %for.body945 + +for.body945: ; preds = %for.inc949, %for.end923 + %inc950112114 = phi i16 [ %v_loop_0940.promoted, %for.end923 ], [ %inc950, %for.inc949 ] + %idxprom946 = sext i16 %inc950112114 to i32 + %arrayidx947.idx = mul i32 %idxprom946, 2 + %arrayidx947 = getelementptr [3 x float], [3 x float]* @v_253, i32 0, i32 %idxprom946 + %1506 = sub i32 6, %arrayidx947.idx + %1507 = icmp ult i32 6, %arrayidx947.idx + %1508 = icmp ult i32 %1506, 2 + %1509 = or i1 %1507, %1508 + br i1 %1509, label %trap.loopexit63, label %for.inc949 + +for.inc949: ; preds = %for.body945 + %1510 = load float, float* %arrayidx947, align 1 + %conv948 = fpext float %1510 to double + call void @modify_checksum(double %conv948) + %inc950 = add i16 %inc950112114, 1 + %cmp942 = icmp slt i16 %inc950, 3 + br i1 %cmp942, label %for.body945, label %for.end951 + +for.end951: ; preds = %for.inc949 + %inc950112.lcssa = phi i16 [ %inc950, %for.inc949 ] + store i16 %inc950112.lcssa, i16* %v_loop_0940, align 1 + store i32 44, i32* %cleanup.dest.slot, align 1 + %1511 = bitcast i16* %v_loop_0940 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1511) #1 + %1512 = load i64, i64* @v_255, align 1 + %conv952 = uitofp i64 %1512 to double + call void @modify_checksum(double %conv952) + %1513 = load float, float* @v_258, align 1 + %conv953 = fpext float %1513 to double + call void @modify_checksum(double %conv953) + %1514 = load float, float* @v_263, align 1 + %conv954 = fpext float %1514 to double + call void @modify_checksum(double %conv954) + %1515 = load float, float* @v_264, align 1 + %conv955 = fpext float %1515 to double + call void @modify_checksum(double %conv955) + %1516 = load volatile i24, i24* @v_266, align 1 + %1517 = sitofp i24 %1516 to double + %1518 = fmul double %1517, 0x3F00000000000000 + call void @modify_checksum(double %1518) + %1519 = load i64, i64* @v_268, align 1 + %conv956 = uitofp i64 %1519 to double + call void @modify_checksum(double %conv956) + %1520 = load i16, i16* @v_269, align 1 + %conv957 = sitofp i16 %1520 to double + call void @modify_checksum(double %conv957) + call void @modify_checksum(double 4.094000e+03) + %1521 = load volatile i16, i16* @v_278, align 1 + %conv958 = uitofp i16 %1521 to double + call void @modify_checksum(double %conv958) + %1522 = load volatile i16, i16* @v_290, align 1 + %conv959 = uitofp i16 %1522 to double + call void @modify_checksum(double %conv959) + %1523 = load volatile i16, i16* @v_292, align 1 + %conv960 = uitofp i16 %1523 to double + call void @modify_checksum(double %conv960) + %1524 = load i64, i64* @v_294, align 1 + %conv961 = sitofp i64 %1524 to double + call void @modify_checksum(double %conv961) + %1525 = load i64, i64* @v_296, align 1 + %conv962 = sitofp i64 %1525 to double + call void @modify_checksum(double %conv962) + %1526 = load i64, i64* @v_297, align 1 + %conv963 = sitofp i64 %1526 to double + call void @modify_checksum(double %conv963) + %1527 = load i64, i64* @v_310, align 1 + %conv964 = uitofp i64 %1527 to double + call void @modify_checksum(double %conv964) + %1528 = load i64, i64* @v_313, align 1 + %conv965 = sitofp i64 %1528 to double + call void @modify_checksum(double %conv965) + %1529 = load i64, i64* @v_316, align 1 + %conv966 = sitofp i64 %1529 to double + call void @modify_checksum(double %conv966) + %1530 = load i64, i64* @v_318, align 1 + %conv967 = sitofp i64 %1530 to double + call void @modify_checksum(double %conv967) + %1531 = bitcast i16* %v_loop_0968 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1531) #1 + store i16 0, i16* %v_loop_0968, align 1 + %v_loop_0968.promoted = load i16, i16* %v_loop_0968, align 1 + br label %for.body973 + +for.body973: ; preds = %for.inc977, %for.end951 + %inc978115117 = phi i16 [ %v_loop_0968.promoted, %for.end951 ], [ %inc978, %for.inc977 ] + %idxprom974 = sext i16 %inc978115117 to i32 + %arrayidx975.idx = mul i16 %inc978115117, 4 + %arrayidx975 = getelementptr [40 x i64], [40 x i64]* @v_319, i32 0, i32 %idxprom974 + %1532 = sub i16 160, %arrayidx975.idx + %1533 = icmp ult i16 160, %arrayidx975.idx + %1534 = icmp ult i16 %1532, 4 + %1535 = or i1 %1533, %1534 + br i1 %1535, label %trap.loopexit62, label %for.inc977 + +for.inc977: ; preds = %for.body973 + %1536 = load i64, i64* %arrayidx975, align 1 + %conv976 = sitofp i64 %1536 to double + call void @modify_checksum(double %conv976) + %inc978 = add i16 %inc978115117, 1 + %cmp970 = icmp slt i16 %inc978, 40 + br i1 %cmp970, label %for.body973, label %for.end979 + +for.end979: ; preds = %for.inc977 + %inc978115.lcssa = phi i16 [ %inc978, %for.inc977 ] + store i16 %inc978115.lcssa, i16* %v_loop_0968, align 1 + store i32 47, i32* %cleanup.dest.slot, align 1 + %1537 = bitcast i16* %v_loop_0968 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1537) #1 + %1538 = load i64, i64* @v_320, align 1 + %conv980 = sitofp i64 %1538 to double + call void @modify_checksum(double %conv980) + %1539 = bitcast i16* %v_loop_0981 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1539) #1 + store i16 0, i16* %v_loop_0981, align 1 + %v_loop_0981.promoted = load i16, i16* %v_loop_0981, align 1 + br label %for.body986 + +for.body986: ; preds = %for.inc990, %for.end979 + %inc991118120 = phi i16 [ %v_loop_0981.promoted, %for.end979 ], [ %inc991, %for.inc990 ] + %idxprom987 = sext i16 %inc991118120 to i32 + %arrayidx988.idx = mul i16 %inc991118120, 4 + %arrayidx988 = getelementptr [40 x i64], [40 x i64]* @v_321, i32 0, i32 %idxprom987 + %1540 = sub i16 160, %arrayidx988.idx + %1541 = icmp ult i16 160, %arrayidx988.idx + %1542 = icmp ult i16 %1540, 4 + %1543 = or i1 %1541, %1542 + br i1 %1543, label %trap.loopexit61, label %for.inc990 + +for.inc990: ; preds = %for.body986 + %1544 = load i64, i64* %arrayidx988, align 1 + %conv989 = sitofp i64 %1544 to double + call void @modify_checksum(double %conv989) + %inc991 = add i16 %inc991118120, 1 + %cmp983 = icmp slt i16 %inc991, 40 + br i1 %cmp983, label %for.body986, label %for.end992 + +for.end992: ; preds = %for.inc990 + %inc991118.lcssa = phi i16 [ %inc991, %for.inc990 ] + store i16 %inc991118.lcssa, i16* %v_loop_0981, align 1 + store i32 50, i32* %cleanup.dest.slot, align 1 + %1545 = bitcast i16* %v_loop_0981 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1545) #1 + %1546 = load i64, i64* @v_322, align 1 + %conv993 = sitofp i64 %1546 to double + call void @modify_checksum(double %conv993) + %1547 = bitcast i16* %v_loop_0994 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1547) #1 + store i16 0, i16* %v_loop_0994, align 1 + %v_loop_0994.promoted = load i16, i16* %v_loop_0994, align 1 + br label %for.body999 + +for.body999: ; preds = %for.inc1003, %for.end992 + %inc1004121123 = phi i16 [ %v_loop_0994.promoted, %for.end992 ], [ %inc1004, %for.inc1003 ] + %idxprom1000 = sext i16 %inc1004121123 to i32 + %arrayidx1001.idx = mul i16 %inc1004121123, 4 + %arrayidx1001 = getelementptr [40 x i64], [40 x i64]* @v_323, i32 0, i32 %idxprom1000 + %1548 = sub i16 160, %arrayidx1001.idx + %1549 = icmp ult i16 160, %arrayidx1001.idx + %1550 = icmp ult i16 %1548, 4 + %1551 = or i1 %1549, %1550 + br i1 %1551, label %trap.loopexit60, label %for.inc1003 + +for.inc1003: ; preds = %for.body999 + %1552 = load i64, i64* %arrayidx1001, align 1 + %conv1002 = sitofp i64 %1552 to double + call void @modify_checksum(double %conv1002) + %inc1004 = add i16 %inc1004121123, 1 + %cmp996 = icmp slt i16 %inc1004, 40 + br i1 %cmp996, label %for.body999, label %for.end1005 + +for.end1005: ; preds = %for.inc1003 + %inc1004121.lcssa = phi i16 [ %inc1004, %for.inc1003 ] + store i16 %inc1004121.lcssa, i16* %v_loop_0994, align 1 + store i32 53, i32* %cleanup.dest.slot, align 1 + %1553 = bitcast i16* %v_loop_0994 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1553) #1 + %1554 = load i64, i64* @v_324, align 1 + %conv1006 = sitofp i64 %1554 to double + call void @modify_checksum(double %conv1006) + %1555 = bitcast i16* %v_loop_01007 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1555) #1 + store i16 0, i16* %v_loop_01007, align 1 + %v_loop_01007.promoted = load i16, i16* %v_loop_01007, align 1 + br label %for.body1012 + +for.body1012: ; preds = %for.inc1016, %for.end1005 + %inc1017124126 = phi i16 [ %v_loop_01007.promoted, %for.end1005 ], [ %inc1017, %for.inc1016 ] + %idxprom1013 = sext i16 %inc1017124126 to i32 + %arrayidx1014.idx = mul i16 %inc1017124126, 4 + %arrayidx1014 = getelementptr [40 x i64], [40 x i64]* @v_325, i32 0, i32 %idxprom1013 + %1556 = sub i16 160, %arrayidx1014.idx + %1557 = icmp ult i16 160, %arrayidx1014.idx + %1558 = icmp ult i16 %1556, 4 + %1559 = or i1 %1557, %1558 + br i1 %1559, label %trap.loopexit59, label %for.inc1016 + +for.inc1016: ; preds = %for.body1012 + %1560 = load i64, i64* %arrayidx1014, align 1 + %conv1015 = sitofp i64 %1560 to double + call void @modify_checksum(double %conv1015) + %inc1017 = add i16 %inc1017124126, 1 + %cmp1009 = icmp slt i16 %inc1017, 40 + br i1 %cmp1009, label %for.body1012, label %for.end1018 + +for.end1018: ; preds = %for.inc1016 + %inc1017124.lcssa = phi i16 [ %inc1017, %for.inc1016 ] + store i16 %inc1017124.lcssa, i16* %v_loop_01007, align 1 + store i32 56, i32* %cleanup.dest.slot, align 1 + %1561 = bitcast i16* %v_loop_01007 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1561) #1 + %1562 = load i16, i16* @v_335, align 1 + %1563 = sitofp i16 %1562 to double + %1564 = fmul double %1563, 0x3F00000000000000 + call void @modify_checksum(double %1564) + %1565 = load i32, i32* @v_342, align 1 + %1566 = sitofp i32 %1565 to double + %1567 = fmul double %1566, 0x3E00000000000000 + call void @modify_checksum(double %1567) + %1568 = bitcast i16* %v_loop_01019 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1568) #1 + store i16 0, i16* %v_loop_01019, align 1 + %v_loop_01019.promoted = load i16, i16* %v_loop_01019, align 1 + br label %for.body1024 + +for.body1024: ; preds = %for.inc1027, %for.end1018 + %inc1028127129 = phi i16 [ %v_loop_01019.promoted, %for.end1018 ], [ %inc1028, %for.inc1027 ] + %idxprom1025 = sext i16 %inc1028127129 to i32 + %arrayidx1026.idx = mul i32 %idxprom1025, 2 + %arrayidx1026 = getelementptr [6 x i32], [6 x i32]* @v_343, i32 0, i32 %idxprom1025 + %1569 = sub i32 12, %arrayidx1026.idx + %1570 = icmp ult i32 12, %arrayidx1026.idx + %1571 = icmp ult i32 %1569, 2 + %1572 = or i1 %1570, %1571 + br i1 %1572, label %trap.loopexit58, label %for.inc1027 + +for.inc1027: ; preds = %for.body1024 + %1573 = load i32, i32* %arrayidx1026, align 1 + %1574 = sitofp i32 %1573 to double + %1575 = fmul double %1574, 0x3E00000000000000 + call void @modify_checksum(double %1575) + %inc1028 = add i16 %inc1028127129, 1 + %cmp1021 = icmp slt i16 %inc1028, 6 + br i1 %cmp1021, label %for.body1024, label %for.end1029 + +for.end1029: ; preds = %for.inc1027 + %inc1028127.lcssa = phi i16 [ %inc1028, %for.inc1027 ] + store i16 %inc1028127.lcssa, i16* %v_loop_01019, align 1 + store i32 59, i32* %cleanup.dest.slot, align 1 + %1576 = bitcast i16* %v_loop_01019 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1576) #1 + %1577 = load i32, i32* @v_344, align 1 + %1578 = sitofp i32 %1577 to double + %1579 = fmul double %1578, 0x3E00000000000000 + call void @modify_checksum(double %1579) + %1580 = load volatile i24, i24* @v_346, align 1 + %1581 = uitofp i24 %1580 to double + %1582 = fmul double %1581, 0x3F00000000000000 + call void @modify_checksum(double %1582) + %1583 = load i24, i24* @v_349, align 1 + %1584 = uitofp i24 %1583 to double + %1585 = fmul double %1584, 0x3F00000000000000 + call void @modify_checksum(double %1585) + %1586 = load i64, i64* @v_350, align 1 + %conv1030 = sitofp i64 %1586 to double + call void @modify_checksum(double %conv1030) + %1587 = load i24, i24* @v_352, align 1 + %1588 = uitofp i24 %1587 to double + %1589 = fmul double %1588, 0x3F00000000000000 + call void @modify_checksum(double %1589) + %1590 = load volatile double, double* @v_355, align 1 + call void @modify_checksum(double %1590) + %1591 = load volatile double, double* @v_357, align 1 + call void @modify_checksum(double %1591) + %1592 = load volatile double, double* @v_358, align 1 + call void @modify_checksum(double %1592) + %1593 = load i24, i24* @v_361, align 1 + %1594 = uitofp i24 %1593 to double + %1595 = fmul double %1594, 0x3F00000000000000 + call void @modify_checksum(double %1595) + %1596 = load volatile i16, i16* @v_363, align 1 + %conv1031 = uitofp i16 %1596 to double + call void @modify_checksum(double %conv1031) + %1597 = load volatile i16, i16* @v_366, align 1 + %conv1032 = uitofp i16 %1597 to double + call void @modify_checksum(double %conv1032) + %1598 = load i40, i40* @v_380, align 1 + %1599 = sitofp i40 %1598 to double + %1600 = fmul double %1599, 0x3E00000000000000 + call void @modify_checksum(double %1600) + %1601 = load volatile i24, i24* @v_385, align 1 + %1602 = uitofp i24 %1601 to double + %1603 = fmul double %1602, 0x3F00000000000000 + call void @modify_checksum(double %1603) + %1604 = load volatile i24, i24* @v_387, align 1 + %1605 = uitofp i24 %1604 to double + %1606 = fmul double %1605, 0x3F00000000000000 + call void @modify_checksum(double %1606) + %1607 = load volatile i16, i16* @v_392, align 1 + %conv1033 = sitofp i16 %1607 to double + call void @modify_checksum(double %conv1033) + %1608 = load i24, i24* @v_403, align 1 + %1609 = uitofp i24 %1608 to double + %1610 = fmul double %1609, 0x3F00000000000000 + call void @modify_checksum(double %1610) + %1611 = load volatile i16, i16* @v_405, align 1 + %conv1034 = uitofp i16 %1611 to double + call void @modify_checksum(double %conv1034) + %1612 = load i24, i24* @v_420, align 1 + %1613 = uitofp i24 %1612 to double + %1614 = fmul double %1613, 0x3F00000000000000 + call void @modify_checksum(double %1614) + %1615 = bitcast i16* %v_loop_01035 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1615) #1 + store i16 0, i16* %v_loop_01035, align 1 + %v_loop_01035.promoted = load i16, i16* %v_loop_01035, align 1 + br label %for.body1040 + +for.body1040: ; preds = %for.inc1043, %for.end1029 + %inc1044130132 = phi i16 [ %v_loop_01035.promoted, %for.end1029 ], [ %inc1044, %for.inc1043 ] + %idxprom1041 = sext i16 %inc1044130132 to i32 + %arrayidx1042.idx = mul i16 %inc1044130132, 2 + %arrayidx1042 = getelementptr [8 x i24], [8 x i24]* @v_421, i32 0, i32 %idxprom1041 + %1616 = sub i16 16, %arrayidx1042.idx + %1617 = icmp ult i16 16, %arrayidx1042.idx + %1618 = icmp ult i16 %1616, 2 + %1619 = or i1 %1617, %1618 + br i1 %1619, label %trap.loopexit57, label %for.inc1043 + +for.inc1043: ; preds = %for.body1040 + %1620 = load i24, i24* %arrayidx1042, align 1 + %1621 = uitofp i24 %1620 to double + %1622 = fmul double %1621, 0x3F00000000000000 + call void @modify_checksum(double %1622) + %inc1044 = add i16 %inc1044130132, 1 + %cmp1037 = icmp slt i16 %inc1044, 8 + br i1 %cmp1037, label %for.body1040, label %for.end1045 + +for.end1045: ; preds = %for.inc1043 + %inc1044130.lcssa = phi i16 [ %inc1044, %for.inc1043 ] + store i16 %inc1044130.lcssa, i16* %v_loop_01035, align 1 + store i32 62, i32* %cleanup.dest.slot, align 1 + %1623 = bitcast i16* %v_loop_01035 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1623) #1 + %1624 = load i64, i64* @v_422, align 1 + %conv1046 = sitofp i64 %1624 to double + call void @modify_checksum(double %conv1046) + %1625 = load i32, i32* @v_425, align 1 + %1626 = sitofp i32 %1625 to double + %1627 = fmul double %1626, 0x3E00000000000000 + call void @modify_checksum(double %1627) + %1628 = load volatile i16, i16* @v_427, align 1 + %conv1047 = sitofp i16 %1628 to double + call void @modify_checksum(double %conv1047) + %1629 = load i64, i64* @v_428, align 1 + %conv1048 = uitofp i64 %1629 to double + call void @modify_checksum(double %conv1048) + %1630 = load i24, i24* @v_429, align 1 + %1631 = sitofp i24 %1630 to double + %1632 = fmul double %1631, 0x3F00000000000000 + call void @modify_checksum(double %1632) + %1633 = load i40, i40* @v_430, align 1 + %1634 = sitofp i40 %1633 to double + %1635 = fmul double %1634, 0x3E00000000000000 + call void @modify_checksum(double %1635) + %1636 = load i40, i40* @v_431, align 1 + %1637 = sitofp i40 %1636 to double + %1638 = fmul double %1637, 0x3E00000000000000 + call void @modify_checksum(double %1638) + %1639 = load double, double* @v_432, align 1 + call void @modify_checksum(double %1639) + %1640 = load i40, i40* @v_434, align 1 + %1641 = sitofp i40 %1640 to double + %1642 = fmul double %1641, 0x3E00000000000000 + call void @modify_checksum(double %1642) + %1643 = load i24, i24* @v_436, align 1 + %1644 = sitofp i24 %1643 to double + %1645 = fmul double %1644, 0x3F00000000000000 + call void @modify_checksum(double %1645) + %1646 = bitcast i16* %v_loop_01049 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1646) #1 + store i16 0, i16* %v_loop_01049, align 1 + %v_loop_01049.promoted = load i16, i16* %v_loop_01049, align 1 + br label %for.body1054 + +for.body1054: ; preds = %for.inc1057, %for.end1045 + %inc1058133135 = phi i16 [ %v_loop_01049.promoted, %for.end1045 ], [ %inc1058, %for.inc1057 ] + %idxprom1055 = sext i16 %inc1058133135 to i32 + %arrayidx1056.idx = mul i16 %inc1058133135, 2 + %arrayidx1056 = getelementptr [32 x i24], [32 x i24]* @v_437, i32 0, i32 %idxprom1055 + %1647 = sub i16 64, %arrayidx1056.idx + %1648 = icmp ult i16 64, %arrayidx1056.idx + %1649 = icmp ult i16 %1647, 2 + %1650 = or i1 %1648, %1649 + br i1 %1650, label %trap.loopexit56, label %for.inc1057 + +for.inc1057: ; preds = %for.body1054 + %1651 = load i24, i24* %arrayidx1056, align 1 + %1652 = sitofp i24 %1651 to double + %1653 = fmul double %1652, 0x3F00000000000000 + call void @modify_checksum(double %1653) + %inc1058 = add i16 %inc1058133135, 1 + %cmp1051 = icmp slt i16 %inc1058, 32 + br i1 %cmp1051, label %for.body1054, label %for.end1059 + +for.end1059: ; preds = %for.inc1057 + %inc1058133.lcssa = phi i16 [ %inc1058, %for.inc1057 ] + store i16 %inc1058133.lcssa, i16* %v_loop_01049, align 1 + store i32 65, i32* %cleanup.dest.slot, align 1 + %1654 = bitcast i16* %v_loop_01049 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1654) #1 + %1655 = load float, float* @v_440, align 1 + %conv1060 = fpext float %1655 to double + call void @modify_checksum(double %conv1060) + %1656 = load i32, i32* @v_443, align 1 + %conv1061 = uitofp i32 %1656 to double + call void @modify_checksum(double %conv1061) + %1657 = bitcast i16* %v_loop_01062 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1657) #1 + store i16 0, i16* %v_loop_01062, align 1 + %v_loop_01062.promoted = load i16, i16* %v_loop_01062, align 1 + br label %for.body1067 + +for.body1067: ; preds = %for.inc1071, %for.end1059 + %inc1072136138 = phi i16 [ %v_loop_01062.promoted, %for.end1059 ], [ %inc1072, %for.inc1071 ] + %idxprom1068 = sext i16 %inc1072136138 to i32 + %arrayidx1069.idx = mul i32 %idxprom1068, 2 + %arrayidx1069 = getelementptr [2 x i32], [2 x i32]* @v_444, i32 0, i32 %idxprom1068 + %1658 = sub i32 4, %arrayidx1069.idx + %1659 = icmp ult i32 4, %arrayidx1069.idx + %1660 = icmp ult i32 %1658, 2 + %1661 = or i1 %1659, %1660 + br i1 %1661, label %trap.loopexit55, label %for.inc1071 + +for.inc1071: ; preds = %for.body1067 + %1662 = load i32, i32* %arrayidx1069, align 1 + %conv1070 = uitofp i32 %1662 to double + call void @modify_checksum(double %conv1070) + %inc1072 = add i16 %inc1072136138, 1 + %cmp1064 = icmp slt i16 %inc1072, 2 + br i1 %cmp1064, label %for.body1067, label %for.end1073 + +for.end1073: ; preds = %for.inc1071 + %inc1072136.lcssa = phi i16 [ %inc1072, %for.inc1071 ] + store i16 %inc1072136.lcssa, i16* %v_loop_01062, align 1 + store i32 68, i32* %cleanup.dest.slot, align 1 + %1663 = bitcast i16* %v_loop_01062 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1663) #1 + %1664 = bitcast i16* %v_loop_01074 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1664) #1 + store i16 0, i16* %v_loop_01074, align 1 + %1665 = bitcast i16* %v_loop_11080 to i8* + %1666 = bitcast i16* %v_loop_11080 to i8* + %v_loop_01074.promoted = load i16, i16* %v_loop_01074, align 1 + br label %for.body1079 + +for.body1079: ; preds = %for.inc1094, %for.end1073 + %inc1095139141 = phi i16 [ %v_loop_01074.promoted, %for.end1073 ], [ %inc1095, %for.inc1094 ] + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1665) #1 + store i16 0, i16* %v_loop_11080, align 1 + br label %for.body1085 + +for.body1085: ; preds = %for.inc1091, %for.body1079 + %idxprom1086 = sext i16 %inc1095139141 to i32 + %arrayidx1087.idx = mul i32 %idxprom1086, 4 + %arrayidx1087 = getelementptr [4 x [2 x i32]], [4 x [2 x i32]]* @v_445, i32 0, i32 %idxprom1086 + %1667 = load i16, i16* %v_loop_11080, align 1 + %idxprom1088 = sext i16 %1667 to i32 + %arrayidx1089.idx = mul i32 %idxprom1088, 2 + %1668 = add i32 %arrayidx1087.idx, %arrayidx1089.idx + %arrayidx1089 = getelementptr [2 x i32], [2 x i32]* %arrayidx1087, i32 0, i32 %idxprom1088 + %1669 = sub i32 16, %1668 + %1670 = icmp ult i32 16, %1668 + %1671 = icmp ult i32 %1669, 2 + %1672 = or i1 %1670, %1671 + br i1 %1672, label %trap.loopexit54, label %for.inc1091 + +for.inc1091: ; preds = %for.body1085 + %1673 = load i32, i32* %arrayidx1089, align 1 + %conv1090 = uitofp i32 %1673 to double + call void @modify_checksum(double %conv1090) + %1674 = load i16, i16* %v_loop_11080, align 1 + %inc1092 = add i16 %1674, 1 + store i16 %inc1092, i16* %v_loop_11080, align 1 + %cmp1082 = icmp slt i16 %inc1092, 2 + br i1 %cmp1082, label %for.body1085, label %for.inc1094 + +for.inc1094: ; preds = %for.inc1091 + store i32 74, i32* %cleanup.dest.slot, align 1 + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1666) #1 + %inc1095 = add i16 %inc1095139141, 1 + %cmp1076 = icmp slt i16 %inc1095, 4 + br i1 %cmp1076, label %for.body1079, label %for.end1096 + +for.end1096: ; preds = %for.inc1094 + %inc1095139.lcssa = phi i16 [ %inc1095, %for.inc1094 ] + store i16 %inc1095139.lcssa, i16* %v_loop_01074, align 1 + store i32 71, i32* %cleanup.dest.slot, align 1 + %1675 = bitcast i16* %v_loop_01074 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1675) #1 + %1676 = load double, double* @v_447, align 1 + call void @modify_checksum(double %1676) + %1677 = bitcast i16* %v_loop_01097 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1677) #1 + store i16 0, i16* %v_loop_01097, align 1 + %v_loop_01097.promoted = load i16, i16* %v_loop_01097, align 1 + br label %for.body1102 + +for.body1102: ; preds = %for.inc1105, %for.end1096 + %inc1106142144 = phi i16 [ %v_loop_01097.promoted, %for.end1096 ], [ %inc1106, %for.inc1105 ] + %idxprom1103 = sext i16 %inc1106142144 to i32 + %arrayidx1104.idx = mul i16 %inc1106142144, 4 + %arrayidx1104 = getelementptr [64 x double], [64 x double]* @v_448, i32 0, i32 %idxprom1103 + %1678 = sub i16 256, %arrayidx1104.idx + %1679 = icmp ult i16 256, %arrayidx1104.idx + %1680 = icmp ult i16 %1678, 4 + %1681 = or i1 %1679, %1680 + br i1 %1681, label %trap.loopexit53, label %for.inc1105 + +for.inc1105: ; preds = %for.body1102 + %1682 = load double, double* %arrayidx1104, align 1 + call void @modify_checksum(double %1682) + %inc1106 = add i16 %inc1106142144, 1 + %cmp1099 = icmp slt i16 %inc1106, 64 + br i1 %cmp1099, label %for.body1102, label %for.end1107 + +for.end1107: ; preds = %for.inc1105 + %inc1106142.lcssa = phi i16 [ %inc1106, %for.inc1105 ] + store i16 %inc1106142.lcssa, i16* %v_loop_01097, align 1 + store i32 77, i32* %cleanup.dest.slot, align 1 + %1683 = bitcast i16* %v_loop_01097 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1683) #1 + %1684 = load double, double* @v_449, align 1 + call void @modify_checksum(double %1684) + %1685 = bitcast i16* %v_loop_01108 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1685) #1 + store i16 0, i16* %v_loop_01108, align 1 + %v_loop_01108.promoted = load i16, i16* %v_loop_01108, align 1 + br label %for.body1113 + +for.body1113: ; preds = %for.inc1116, %for.end1107 + %inc1117145147 = phi i16 [ %v_loop_01108.promoted, %for.end1107 ], [ %inc1117, %for.inc1116 ] + %idxprom1114 = sext i16 %inc1117145147 to i32 + %arrayidx1115.idx = mul i16 %inc1117145147, 4 + %arrayidx1115 = getelementptr [64 x double], [64 x double]* @v_450, i32 0, i32 %idxprom1114 + %1686 = sub i16 256, %arrayidx1115.idx + %1687 = icmp ult i16 256, %arrayidx1115.idx + %1688 = icmp ult i16 %1686, 4 + %1689 = or i1 %1687, %1688 + br i1 %1689, label %trap.loopexit52, label %for.inc1116 + +for.inc1116: ; preds = %for.body1113 + %1690 = load double, double* %arrayidx1115, align 1 + call void @modify_checksum(double %1690) + %inc1117 = add i16 %inc1117145147, 1 + %cmp1110 = icmp slt i16 %inc1117, 64 + br i1 %cmp1110, label %for.body1113, label %for.end1118 + +for.end1118: ; preds = %for.inc1116 + %inc1117145.lcssa = phi i16 [ %inc1117, %for.inc1116 ] + store i16 %inc1117145.lcssa, i16* %v_loop_01108, align 1 + store i32 80, i32* %cleanup.dest.slot, align 1 + %1691 = bitcast i16* %v_loop_01108 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1691) #1 + %1692 = load double, double* @v_451, align 1 + call void @modify_checksum(double %1692) + %1693 = bitcast i16* %v_loop_01119 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1693) #1 + store i16 0, i16* %v_loop_01119, align 1 + %v_loop_01119.promoted = load i16, i16* %v_loop_01119, align 1 + br label %for.body1124 + +for.body1124: ; preds = %for.inc1127, %for.end1118 + %inc1128148150 = phi i16 [ %v_loop_01119.promoted, %for.end1118 ], [ %inc1128, %for.inc1127 ] + %idxprom1125 = sext i16 %inc1128148150 to i32 + %arrayidx1126.idx = mul i16 %inc1128148150, 4 + %arrayidx1126 = getelementptr [64 x double], [64 x double]* @v_452, i32 0, i32 %idxprom1125 + %1694 = sub i16 256, %arrayidx1126.idx + %1695 = icmp ult i16 256, %arrayidx1126.idx + %1696 = icmp ult i16 %1694, 4 + %1697 = or i1 %1695, %1696 + br i1 %1697, label %trap.loopexit51, label %for.inc1127 + +for.inc1127: ; preds = %for.body1124 + %1698 = load double, double* %arrayidx1126, align 1 + call void @modify_checksum(double %1698) + %inc1128 = add i16 %inc1128148150, 1 + %cmp1121 = icmp slt i16 %inc1128, 64 + br i1 %cmp1121, label %for.body1124, label %for.end1129 + +for.end1129: ; preds = %for.inc1127 + %inc1128148.lcssa = phi i16 [ %inc1128, %for.inc1127 ] + store i16 %inc1128148.lcssa, i16* %v_loop_01119, align 1 + store i32 83, i32* %cleanup.dest.slot, align 1 + %1699 = bitcast i16* %v_loop_01119 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1699) #1 + %1700 = load double, double* @v_453, align 1 + call void @modify_checksum(double %1700) + %1701 = bitcast i16* %v_loop_01130 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1701) #1 + store i16 0, i16* %v_loop_01130, align 1 + %v_loop_01130.promoted = load i16, i16* %v_loop_01130, align 1 + br label %for.body1135 + +for.body1135: ; preds = %for.inc1138, %for.end1129 + %inc1139151153 = phi i16 [ %v_loop_01130.promoted, %for.end1129 ], [ %inc1139, %for.inc1138 ] + %idxprom1136 = sext i16 %inc1139151153 to i32 + %arrayidx1137.idx = mul i16 %inc1139151153, 4 + %arrayidx1137 = getelementptr [64 x double], [64 x double]* @v_454, i32 0, i32 %idxprom1136 + %1702 = sub i16 256, %arrayidx1137.idx + %1703 = icmp ult i16 256, %arrayidx1137.idx + %1704 = icmp ult i16 %1702, 4 + %1705 = or i1 %1703, %1704 + br i1 %1705, label %trap.loopexit50, label %for.inc1138 + +for.inc1138: ; preds = %for.body1135 + %1706 = load double, double* %arrayidx1137, align 1 + call void @modify_checksum(double %1706) + %inc1139 = add i16 %inc1139151153, 1 + %cmp1132 = icmp slt i16 %inc1139, 64 + br i1 %cmp1132, label %for.body1135, label %for.end1140 + +for.end1140: ; preds = %for.inc1138 + %inc1139151.lcssa = phi i16 [ %inc1139, %for.inc1138 ] + store i16 %inc1139151.lcssa, i16* %v_loop_01130, align 1 + store i32 86, i32* %cleanup.dest.slot, align 1 + %1707 = bitcast i16* %v_loop_01130 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1707) #1 + %1708 = load i64, i64* @v_456, align 1 + %conv1141 = uitofp i64 %1708 to double + call void @modify_checksum(double %conv1141) + %1709 = load i16, i16* @v_470, align 1 + %conv1142 = sitofp i16 %1709 to double + call void @modify_checksum(double %conv1142) + %1710 = load float, float* @v_471, align 1 + %conv1143 = fpext float %1710 to double + call void @modify_checksum(double %conv1143) + %1711 = bitcast i16* %v_loop_01144 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1711) #1 + store i16 0, i16* %v_loop_01144, align 1 + %v_loop_01144.promoted = load i16, i16* %v_loop_01144, align 1 + br label %for.body1149 + +for.body1149: ; preds = %for.inc1153, %for.end1140 + %inc1154154156 = phi i16 [ %v_loop_01144.promoted, %for.end1140 ], [ %inc1154, %for.inc1153 ] + %idxprom1150 = sext i16 %inc1154154156 to i32 + %arrayidx1151.idx = mul i32 %idxprom1150, 2 + %arrayidx1151 = getelementptr [28 x float], [28 x float]* @v_472, i32 0, i32 %idxprom1150 + %1712 = sub i32 56, %arrayidx1151.idx + %1713 = icmp ult i32 56, %arrayidx1151.idx + %1714 = icmp ult i32 %1712, 2 + %1715 = or i1 %1713, %1714 + br i1 %1715, label %trap.loopexit49, label %for.inc1153 + +for.inc1153: ; preds = %for.body1149 + %1716 = load float, float* %arrayidx1151, align 1 + %conv1152 = fpext float %1716 to double + call void @modify_checksum(double %conv1152) + %inc1154 = add i16 %inc1154154156, 1 + %cmp1146 = icmp slt i16 %inc1154, 28 + br i1 %cmp1146, label %for.body1149, label %for.end1155 + +for.end1155: ; preds = %for.inc1153 + %inc1154154.lcssa = phi i16 [ %inc1154, %for.inc1153 ] + store i16 %inc1154154.lcssa, i16* %v_loop_01144, align 1 + store i32 89, i32* %cleanup.dest.slot, align 1 + %1717 = bitcast i16* %v_loop_01144 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1717) #1 + %1718 = load float, float* @v_473, align 1 + %conv1156 = fpext float %1718 to double + call void @modify_checksum(double %conv1156) + %1719 = bitcast i16* %v_loop_01157 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1719) #1 + store i16 0, i16* %v_loop_01157, align 1 + %v_loop_01157.promoted = load i16, i16* %v_loop_01157, align 1 + br label %for.body1162 + +for.body1162: ; preds = %for.inc1166, %for.end1155 + %inc1167157159 = phi i16 [ %v_loop_01157.promoted, %for.end1155 ], [ %inc1167, %for.inc1166 ] + %idxprom1163 = sext i16 %inc1167157159 to i32 + %arrayidx1164.idx = mul i32 %idxprom1163, 2 + %arrayidx1164 = getelementptr [28 x float], [28 x float]* @v_474, i32 0, i32 %idxprom1163 + %1720 = sub i32 56, %arrayidx1164.idx + %1721 = icmp ult i32 56, %arrayidx1164.idx + %1722 = icmp ult i32 %1720, 2 + %1723 = or i1 %1721, %1722 + br i1 %1723, label %trap.loopexit48, label %for.inc1166 + +for.inc1166: ; preds = %for.body1162 + %1724 = load float, float* %arrayidx1164, align 1 + %conv1165 = fpext float %1724 to double + call void @modify_checksum(double %conv1165) + %inc1167 = add i16 %inc1167157159, 1 + %cmp1159 = icmp slt i16 %inc1167, 28 + br i1 %cmp1159, label %for.body1162, label %for.end1168 + +for.end1168: ; preds = %for.inc1166 + %inc1167157.lcssa = phi i16 [ %inc1167, %for.inc1166 ] + store i16 %inc1167157.lcssa, i16* %v_loop_01157, align 1 + store i32 92, i32* %cleanup.dest.slot, align 1 + %1725 = bitcast i16* %v_loop_01157 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1725) #1 + %1726 = load float, float* @v_475, align 1 + %conv1169 = fpext float %1726 to double + call void @modify_checksum(double %conv1169) + %1727 = bitcast i16* %v_loop_01170 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1727) #1 + store i16 0, i16* %v_loop_01170, align 1 + %v_loop_01170.promoted = load i16, i16* %v_loop_01170, align 1 + br label %for.body1175 + +for.body1175: ; preds = %for.inc1179, %for.end1168 + %inc1180160162 = phi i16 [ %v_loop_01170.promoted, %for.end1168 ], [ %inc1180, %for.inc1179 ] + %idxprom1176 = sext i16 %inc1180160162 to i32 + %arrayidx1177.idx = mul i32 %idxprom1176, 2 + %arrayidx1177 = getelementptr [28 x float], [28 x float]* @v_476, i32 0, i32 %idxprom1176 + %1728 = sub i32 56, %arrayidx1177.idx + %1729 = icmp ult i32 56, %arrayidx1177.idx + %1730 = icmp ult i32 %1728, 2 + %1731 = or i1 %1729, %1730 + br i1 %1731, label %trap.loopexit47, label %for.inc1179 + +for.inc1179: ; preds = %for.body1175 + %1732 = load float, float* %arrayidx1177, align 1 + %conv1178 = fpext float %1732 to double + call void @modify_checksum(double %conv1178) + %inc1180 = add i16 %inc1180160162, 1 + %cmp1172 = icmp slt i16 %inc1180, 28 + br i1 %cmp1172, label %for.body1175, label %for.end1181 + +for.end1181: ; preds = %for.inc1179 + %inc1180160.lcssa = phi i16 [ %inc1180, %for.inc1179 ] + store i16 %inc1180160.lcssa, i16* %v_loop_01170, align 1 + store i32 95, i32* %cleanup.dest.slot, align 1 + %1733 = bitcast i16* %v_loop_01170 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1733) #1 + %1734 = load i64, i64* @v_477, align 1 + %conv1182 = uitofp i64 %1734 to double + call void @modify_checksum(double %conv1182) + %1735 = load i64, i64* @v_483, align 1 + %conv1183 = sitofp i64 %1735 to double + call void @modify_checksum(double %conv1183) + %1736 = bitcast i16* %v_loop_01184 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1736) #1 + store i16 0, i16* %v_loop_01184, align 1 + %v_loop_01184.promoted = load i16, i16* %v_loop_01184, align 1 + br label %for.body1189 + +for.body1189: ; preds = %for.inc1193, %for.end1181 + %inc1194163165 = phi i16 [ %v_loop_01184.promoted, %for.end1181 ], [ %inc1194, %for.inc1193 ] + %idxprom1190 = sext i16 %inc1194163165 to i32 + %arrayidx1191.idx = mul i16 %inc1194163165, 4 + %arrayidx1191 = getelementptr [7 x i64], [7 x i64]* @v_484, i32 0, i32 %idxprom1190 + %1737 = sub i16 28, %arrayidx1191.idx + %1738 = icmp ult i16 28, %arrayidx1191.idx + %1739 = icmp ult i16 %1737, 4 + %1740 = or i1 %1738, %1739 + br i1 %1740, label %trap.loopexit46, label %for.inc1193 + +for.inc1193: ; preds = %for.body1189 + %1741 = load i64, i64* %arrayidx1191, align 1 + %conv1192 = sitofp i64 %1741 to double + call void @modify_checksum(double %conv1192) + %inc1194 = add i16 %inc1194163165, 1 + %cmp1186 = icmp slt i16 %inc1194, 7 + br i1 %cmp1186, label %for.body1189, label %for.end1195 + +for.end1195: ; preds = %for.inc1193 + %inc1194163.lcssa = phi i16 [ %inc1194, %for.inc1193 ] + store i16 %inc1194163.lcssa, i16* %v_loop_01184, align 1 + store i32 98, i32* %cleanup.dest.slot, align 1 + %1742 = bitcast i16* %v_loop_01184 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1742) #1 + %1743 = load i32, i32* @v_485, align 1 + %1744 = sitofp i32 %1743 to double + %1745 = fmul double %1744, 0x3E00000000000000 + call void @modify_checksum(double %1745) + %1746 = load i40, i40* @v_494, align 1 + %1747 = sitofp i40 %1746 to double + %1748 = fmul double %1747, 0x3E00000000000000 + call void @modify_checksum(double %1748) + %1749 = load volatile i16, i16* @v_497, align 1 + %conv1196 = sitofp i16 %1749 to double + call void @modify_checksum(double %conv1196) + %1750 = load i32, i32* @v_504, align 1 + %conv1197 = uitofp i32 %1750 to double + call void @modify_checksum(double %conv1197) + %1751 = load i16, i16* @v_507, align 1 + %1752 = sitofp i16 %1751 to double + %1753 = fmul double %1752, 0x3F00000000000000 + call void @modify_checksum(double %1753) + %1754 = bitcast i16* %v_loop_01198 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1754) #1 + store i16 0, i16* %v_loop_01198, align 1 + %v_loop_01198.promoted = load i16, i16* %v_loop_01198, align 1 + br label %for.body1203 + +for.body1203: ; preds = %for.inc1206, %for.end1195 + %inc1207166168 = phi i16 [ %v_loop_01198.promoted, %for.end1195 ], [ %inc1207, %for.inc1206 ] + %idxprom1204 = sext i16 %inc1207166168 to i32 + %arrayidx1205 = getelementptr [1 x i16], [1 x i16]* @v_508, i32 0, i32 %idxprom1204 + %1755 = sub i32 1, %idxprom1204 + %1756 = icmp ult i32 1, %idxprom1204 + %1757 = icmp ult i32 %1755, 1 + %1758 = or i1 %1756, %1757 + br i1 %1758, label %trap.loopexit45, label %for.inc1206 + +for.inc1206: ; preds = %for.body1203 + %1759 = load i16, i16* %arrayidx1205, align 1 + %1760 = sitofp i16 %1759 to double + %1761 = fmul double %1760, 0x3F00000000000000 + call void @modify_checksum(double %1761) + %inc1207 = add i16 %inc1207166168, 1 + %cmp1200 = icmp slt i16 %inc1207, 1 + br i1 %cmp1200, label %for.body1203, label %for.end1208 + +for.end1208: ; preds = %for.inc1206 + %inc1207166.lcssa = phi i16 [ %inc1207, %for.inc1206 ] + store i16 %inc1207166.lcssa, i16* %v_loop_01198, align 1 + store i32 101, i32* %cleanup.dest.slot, align 1 + %1762 = bitcast i16* %v_loop_01198 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1762) #1 + %1763 = load i16, i16* @v_509, align 1 + %1764 = sitofp i16 %1763 to double + %1765 = fmul double %1764, 0x3F00000000000000 + call void @modify_checksum(double %1765) + %1766 = bitcast i16* %v_loop_01209 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1766) #1 + store i16 0, i16* %v_loop_01209, align 1 + %v_loop_01209.promoted = load i16, i16* %v_loop_01209, align 1 + br label %for.body1214 + +for.body1214: ; preds = %for.inc1217, %for.end1208 + %inc1218169171 = phi i16 [ %v_loop_01209.promoted, %for.end1208 ], [ %inc1218, %for.inc1217 ] + %idxprom1215 = sext i16 %inc1218169171 to i32 + %arrayidx1216 = getelementptr [1 x i16], [1 x i16]* @v_510, i32 0, i32 %idxprom1215 + %1767 = sub i32 1, %idxprom1215 + %1768 = icmp ult i32 1, %idxprom1215 + %1769 = icmp ult i32 %1767, 1 + %1770 = or i1 %1768, %1769 + br i1 %1770, label %trap.loopexit44, label %for.inc1217 + +for.inc1217: ; preds = %for.body1214 + %1771 = load i16, i16* %arrayidx1216, align 1 + %1772 = sitofp i16 %1771 to double + %1773 = fmul double %1772, 0x3F00000000000000 + call void @modify_checksum(double %1773) + %inc1218 = add i16 %inc1218169171, 1 + %cmp1211 = icmp slt i16 %inc1218, 1 + br i1 %cmp1211, label %for.body1214, label %for.end1219 + +for.end1219: ; preds = %for.inc1217 + %inc1218169.lcssa = phi i16 [ %inc1218, %for.inc1217 ] + store i16 %inc1218169.lcssa, i16* %v_loop_01209, align 1 + store i32 104, i32* %cleanup.dest.slot, align 1 + %1774 = bitcast i16* %v_loop_01209 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1774) #1 + %1775 = load i16, i16* @v_511, align 1 + %1776 = sitofp i16 %1775 to double + %1777 = fmul double %1776, 0x3F00000000000000 + call void @modify_checksum(double %1777) + %1778 = bitcast i16* %v_loop_01220 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1778) #1 + store i16 0, i16* %v_loop_01220, align 1 + %v_loop_01220.promoted = load i16, i16* %v_loop_01220, align 1 + br label %for.body1225 + +for.body1225: ; preds = %for.inc1228, %for.end1219 + %inc1229172174 = phi i16 [ %v_loop_01220.promoted, %for.end1219 ], [ %inc1229, %for.inc1228 ] + %idxprom1226 = sext i16 %inc1229172174 to i32 + %arrayidx1227 = getelementptr [1 x i16], [1 x i16]* @v_512, i32 0, i32 %idxprom1226 + %1779 = sub i32 1, %idxprom1226 + %1780 = icmp ult i32 1, %idxprom1226 + %1781 = icmp ult i32 %1779, 1 + %1782 = or i1 %1780, %1781 + br i1 %1782, label %trap.loopexit43, label %for.inc1228 + +for.inc1228: ; preds = %for.body1225 + %1783 = load i16, i16* %arrayidx1227, align 1 + %1784 = sitofp i16 %1783 to double + %1785 = fmul double %1784, 0x3F00000000000000 + call void @modify_checksum(double %1785) + %inc1229 = add i16 %inc1229172174, 1 + %cmp1222 = icmp slt i16 %inc1229, 1 + br i1 %cmp1222, label %for.body1225, label %for.end1230 + +for.end1230: ; preds = %for.inc1228 + %inc1229172.lcssa = phi i16 [ %inc1229, %for.inc1228 ] + store i16 %inc1229172.lcssa, i16* %v_loop_01220, align 1 + store i32 107, i32* %cleanup.dest.slot, align 1 + %1786 = bitcast i16* %v_loop_01220 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1786) #1 + %1787 = load i16, i16* @v_513, align 1 + %1788 = sitofp i16 %1787 to double + %1789 = fmul double %1788, 0x3F00000000000000 + call void @modify_checksum(double %1789) + %1790 = bitcast i16* %v_loop_01231 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1790) #1 + store i16 0, i16* %v_loop_01231, align 1 + %v_loop_01231.promoted = load i16, i16* %v_loop_01231, align 1 + br label %for.body1236 + +for.body1236: ; preds = %for.inc1239, %for.end1230 + %inc1240175177 = phi i16 [ %v_loop_01231.promoted, %for.end1230 ], [ %inc1240, %for.inc1239 ] + %idxprom1237 = sext i16 %inc1240175177 to i32 + %arrayidx1238 = getelementptr [1 x i16], [1 x i16]* @v_514, i32 0, i32 %idxprom1237 + %1791 = sub i32 1, %idxprom1237 + %1792 = icmp ult i32 1, %idxprom1237 + %1793 = icmp ult i32 %1791, 1 + %1794 = or i1 %1792, %1793 + br i1 %1794, label %trap.loopexit42, label %for.inc1239 + +for.inc1239: ; preds = %for.body1236 + %1795 = load i16, i16* %arrayidx1238, align 1 + %1796 = sitofp i16 %1795 to double + %1797 = fmul double %1796, 0x3F00000000000000 + call void @modify_checksum(double %1797) + %inc1240 = add i16 %inc1240175177, 1 + %cmp1233 = icmp slt i16 %inc1240, 1 + br i1 %cmp1233, label %for.body1236, label %for.end1241 + +for.end1241: ; preds = %for.inc1239 + %inc1240175.lcssa = phi i16 [ %inc1240, %for.inc1239 ] + store i16 %inc1240175.lcssa, i16* %v_loop_01231, align 1 + store i32 110, i32* %cleanup.dest.slot, align 1 + %1798 = bitcast i16* %v_loop_01231 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1798) #1 + %1799 = load i64, i64* @v_516, align 1 + %conv1242 = sitofp i64 %1799 to double + call void @modify_checksum(double %conv1242) + %1800 = bitcast i16* %v_loop_01243 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1800) #1 + store i16 0, i16* %v_loop_01243, align 1 + %v_loop_01243.promoted = load i16, i16* %v_loop_01243, align 1 + br label %for.body1248 + +for.body1248: ; preds = %for.inc1252, %for.end1241 + %inc1253178180 = phi i16 [ %v_loop_01243.promoted, %for.end1241 ], [ %inc1253, %for.inc1252 ] + %idxprom1249 = sext i16 %inc1253178180 to i32 + %arrayidx1250.idx = mul i16 %inc1253178180, 4 + %arrayidx1250 = getelementptr [6 x i64], [6 x i64]* @v_517, i32 0, i32 %idxprom1249 + %1801 = sub i16 24, %arrayidx1250.idx + %1802 = icmp ult i16 24, %arrayidx1250.idx + %1803 = icmp ult i16 %1801, 4 + %1804 = or i1 %1802, %1803 + br i1 %1804, label %trap.loopexit41, label %for.inc1252 + +for.inc1252: ; preds = %for.body1248 + %1805 = load i64, i64* %arrayidx1250, align 1 + %conv1251 = sitofp i64 %1805 to double + call void @modify_checksum(double %conv1251) + %inc1253 = add i16 %inc1253178180, 1 + %cmp1245 = icmp slt i16 %inc1253, 6 + br i1 %cmp1245, label %for.body1248, label %for.end1254 + +for.end1254: ; preds = %for.inc1252 + %inc1253178.lcssa = phi i16 [ %inc1253, %for.inc1252 ] + store i16 %inc1253178.lcssa, i16* %v_loop_01243, align 1 + store i32 113, i32* %cleanup.dest.slot, align 1 + %1806 = bitcast i16* %v_loop_01243 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1806) #1 + %1807 = load double, double* @v_519, align 1 + call void @modify_checksum(double %1807) + %1808 = load float, float* @v_522, align 1 + %conv1255 = fpext float %1808 to double + call void @modify_checksum(double %conv1255) + %1809 = bitcast i16* %v_loop_01256 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1809) #1 + store i16 0, i16* %v_loop_01256, align 1 + %v_loop_01256.promoted = load i16, i16* %v_loop_01256, align 1 + br label %for.body1261 + +for.body1261: ; preds = %for.inc1265, %for.end1254 + %inc1266181183 = phi i16 [ %v_loop_01256.promoted, %for.end1254 ], [ %inc1266, %for.inc1265 ] + %idxprom1262 = sext i16 %inc1266181183 to i32 + %arrayidx1263.idx = mul i32 %idxprom1262, 2 + %arrayidx1263 = getelementptr [2 x float], [2 x float]* @v_523, i32 0, i32 %idxprom1262 + %1810 = sub i32 4, %arrayidx1263.idx + %1811 = icmp ult i32 4, %arrayidx1263.idx + %1812 = icmp ult i32 %1810, 2 + %1813 = or i1 %1811, %1812 + br i1 %1813, label %trap.loopexit, label %for.inc1265 + +for.inc1265: ; preds = %for.body1261 + %1814 = load float, float* %arrayidx1263, align 1 + %conv1264 = fpext float %1814 to double + call void @modify_checksum(double %conv1264) + %inc1266 = add i16 %inc1266181183, 1 + %cmp1258 = icmp slt i16 %inc1266, 2 + br i1 %cmp1258, label %for.body1261, label %for.end1267 + +for.end1267: ; preds = %for.inc1265 + %inc1266181.lcssa = phi i16 [ %inc1266, %for.inc1265 ] + store i16 %inc1266181.lcssa, i16* %v_loop_01256, align 1 + store i32 116, i32* %cleanup.dest.slot, align 1 + %1815 = bitcast i16* %v_loop_01256 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1815) #1 + %1816 = load volatile double, double* @v_524, align 1 + call void @modify_checksum(double %1816) + %1817 = load i16, i16* @v_530, align 1 + %conv1268 = uitofp i16 %1817 to double + call void @modify_checksum(double %conv1268) + %1818 = load i16, i16* @v_531, align 1 + %conv1269 = uitofp i16 %1818 to double + call void @modify_checksum(double %conv1269) + %1819 = load volatile i24, i24* @v_533, align 1 + %1820 = uitofp i24 %1819 to double + %1821 = fmul double %1820, 0x3F00000000000000 + call void @modify_checksum(double %1821) + %1822 = load double, double* @v_539, align 1 + call void @modify_checksum(double %1822) + %1823 = load i16, i16* @v_542, align 1 + %conv1270 = uitofp i16 %1823 to double + call void @modify_checksum(double %conv1270) + %1824 = load i16, i16* @v_544, align 1 + %conv1271 = uitofp i16 %1824 to double + call void @modify_checksum(double %conv1271) + %1825 = load i64, i64* @v_545, align 1 + %conv1272 = uitofp i64 %1825 to double + call void @modify_checksum(double %conv1272) + %1826 = load i32, i32* @v_548, align 1 + %1827 = uitofp i32 %1826 to double + %1828 = fmul double %1827, 0x3E00000000000000 + call void @modify_checksum(double %1828) + %1829 = load i40, i40* @v_550, align 1 + %1830 = sitofp i40 %1829 to double + %1831 = fmul double %1830, 0x3E00000000000000 + call void @modify_checksum(double %1831) + %1832 = load float, float* @v_553, align 1 + %conv1273 = fpext float %1832 to double + call void @modify_checksum(double %conv1273) + %1833 = load volatile i24, i24* @v_584, align 1 + %1834 = sitofp i24 %1833 to double + %1835 = fmul double %1834, 0x3F00000000000000 + call void @modify_checksum(double %1835) + %1836 = bitcast i16* %v_loop_01274 to i8* + call void @llvm.lifetime.start.p0i8(i64 1, i8* %1836) #1 + store i16 0, i16* %v_loop_01274, align 1 + %v_loop_01274.promoted = load i16, i16* %v_loop_01274, align 1 + br label %for.inc1282 + +for.inc1282: ; preds = %for.inc1282, %for.end1267 + %inc1283184185 = phi i16 [ %v_loop_01274.promoted, %for.end1267 ], [ %inc1283, %for.inc1282 ] + %idxprom1280 = sext i16 %inc1283184185 to i32 + %arrayidx1281 = getelementptr [6 x i24], [6 x i24]* @v_585, i32 0, i32 %idxprom1280 + %1837 = load volatile i24, i24* %arrayidx1281, align 1 + %1838 = sitofp i24 %1837 to double + %1839 = fmul double %1838, 0x3F00000000000000 + call void @modify_checksum(double %1839) + %inc1283 = add i16 %inc1283184185, 1 + %cmp1276 = icmp slt i16 %inc1283, 6 + br i1 %cmp1276, label %for.inc1282, label %for.end1284 + +for.end1284: ; preds = %for.inc1282 + %inc1283184.lcssa = phi i16 [ %inc1283, %for.inc1282 ] + store i16 %inc1283184.lcssa, i16* %v_loop_01274, align 1 + store i32 119, i32* %cleanup.dest.slot, align 1 + %1840 = bitcast i16* %v_loop_01274 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1840) #1 + %1841 = load i32, i32* @v_600, align 1 + %1842 = sitofp i32 %1841 to double + %1843 = fmul double %1842, 0x3E00000000000000 + call void @modify_checksum(double %1843) + %1844 = load i16, i16* @v_612, align 1 + %1845 = sitofp i16 %1844 to double + %1846 = fmul double %1845, 0x3F00000000000000 + call void @modify_checksum(double %1846) + %1847 = load double, double* @v_624, align 1 + call void @modify_checksum(double %1847) + %1848 = load i24, i24* @v_629, align 1 + %1849 = uitofp i24 %1848 to double + %1850 = fmul double %1849, 0x3F00000000000000 + call void @modify_checksum(double %1850) + %1851 = load i64, i64* @v_632, align 1 + %conv1285 = uitofp i64 %1851 to double + call void @modify_checksum(double %conv1285) + %1852 = load volatile double, double* @v_638, align 1 + call void @modify_checksum(double %1852) + call void @print_checksum() + %1853 = bitcast %struct.s_2* %v_640 to i8* + call void @llvm.lifetime.end.p0i8(i64 14, i8* %1853) #1 + %1854 = bitcast { i16, i16 }* %v_637 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %1854) #1 + %1855 = bitcast i24* %v_628 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %1855) #1 + %1856 = bitcast double* %v_627 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1856) #1 + %1857 = bitcast { i40, i40 }* %v_623 to i8* + call void @llvm.lifetime.end.p0i8(i64 6, i8* %1857) #1 + %1858 = bitcast { i16, i16 }* %v_587 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %1858) #1 + %1859 = bitcast { i16, i16 }* %v_560 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %1859) #1 + %1860 = bitcast { i40, i40 }* %v_559 to i8* + call void @llvm.lifetime.end.p0i8(i64 6, i8* %1860) #1 + %1861 = bitcast float* %v_557 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %1861) #1 + %1862 = bitcast double* %v_549 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1862) #1 + %1863 = bitcast i40* %v_496 to i8* + call void @llvm.lifetime.end.p0i8(i64 3, i8* %1863) #1 + %1864 = bitcast { i64, i64 }* %v_493 to i8* + call void @llvm.lifetime.end.p0i8(i64 8, i8* %1864) #1 + %1865 = bitcast i16* %v_479 to i8* + call void @llvm.lifetime.end.p0i8(i64 1, i8* %1865) #1 + %1866 = bitcast i64* %v_460 to i8* + call void @llvm.lifetime.end.p0i8(i64 4, i8* %1866) #1 + %1867 = bitcast i24* %v_13 to i8* + call void @llvm.lifetime.end.p0i8(i64 2, i8* %1867) #1 + %1868 = load i16, i16* %retval, align 1 + ret i16 %1868 + +trap.loopexit: ; preds = %for.body1261 + %inc1266181.lcssa182 = phi i16 [ %inc1266181183, %for.body1261 ] + store i16 %inc1266181.lcssa182, i16* %v_loop_01256, align 1 + br label %trap + +trap.loopexit41: ; preds = %for.body1248 + %inc1253178.lcssa179 = phi i16 [ %inc1253178180, %for.body1248 ] + store i16 %inc1253178.lcssa179, i16* %v_loop_01243, align 1 + br label %trap + +trap.loopexit42: ; preds = %for.body1236 + %inc1240175.lcssa176 = phi i16 [ %inc1240175177, %for.body1236 ] + store i16 %inc1240175.lcssa176, i16* %v_loop_01231, align 1 + br label %trap + +trap.loopexit43: ; preds = %for.body1225 + %inc1229172.lcssa173 = phi i16 [ %inc1229172174, %for.body1225 ] + store i16 %inc1229172.lcssa173, i16* %v_loop_01220, align 1 + br label %trap + +trap.loopexit44: ; preds = %for.body1214 + %inc1218169.lcssa170 = phi i16 [ %inc1218169171, %for.body1214 ] + store i16 %inc1218169.lcssa170, i16* %v_loop_01209, align 1 + br label %trap + +trap.loopexit45: ; preds = %for.body1203 + %inc1207166.lcssa167 = phi i16 [ %inc1207166168, %for.body1203 ] + store i16 %inc1207166.lcssa167, i16* %v_loop_01198, align 1 + br label %trap + +trap.loopexit46: ; preds = %for.body1189 + %inc1194163.lcssa164 = phi i16 [ %inc1194163165, %for.body1189 ] + store i16 %inc1194163.lcssa164, i16* %v_loop_01184, align 1 + br label %trap + +trap.loopexit47: ; preds = %for.body1175 + %inc1180160.lcssa161 = phi i16 [ %inc1180160162, %for.body1175 ] + store i16 %inc1180160.lcssa161, i16* %v_loop_01170, align 1 + br label %trap + +trap.loopexit48: ; preds = %for.body1162 + %inc1167157.lcssa158 = phi i16 [ %inc1167157159, %for.body1162 ] + store i16 %inc1167157.lcssa158, i16* %v_loop_01157, align 1 + br label %trap + +trap.loopexit49: ; preds = %for.body1149 + %inc1154154.lcssa155 = phi i16 [ %inc1154154156, %for.body1149 ] + store i16 %inc1154154.lcssa155, i16* %v_loop_01144, align 1 + br label %trap + +trap.loopexit50: ; preds = %for.body1135 + %inc1139151.lcssa152 = phi i16 [ %inc1139151153, %for.body1135 ] + store i16 %inc1139151.lcssa152, i16* %v_loop_01130, align 1 + br label %trap + +trap.loopexit51: ; preds = %for.body1124 + %inc1128148.lcssa149 = phi i16 [ %inc1128148150, %for.body1124 ] + store i16 %inc1128148.lcssa149, i16* %v_loop_01119, align 1 + br label %trap + +trap.loopexit52: ; preds = %for.body1113 + %inc1117145.lcssa146 = phi i16 [ %inc1117145147, %for.body1113 ] + store i16 %inc1117145.lcssa146, i16* %v_loop_01108, align 1 + br label %trap + +trap.loopexit53: ; preds = %for.body1102 + %inc1106142.lcssa143 = phi i16 [ %inc1106142144, %for.body1102 ] + store i16 %inc1106142.lcssa143, i16* %v_loop_01097, align 1 + br label %trap + +trap.loopexit54: ; preds = %for.body1085 + %inc1095139.lcssa140 = phi i16 [ %inc1095139141, %for.body1085 ] + store i16 %inc1095139.lcssa140, i16* %v_loop_01074, align 1 + br label %trap + +trap.loopexit55: ; preds = %for.body1067 + %inc1072136.lcssa137 = phi i16 [ %inc1072136138, %for.body1067 ] + store i16 %inc1072136.lcssa137, i16* %v_loop_01062, align 1 + br label %trap + +trap.loopexit56: ; preds = %for.body1054 + %inc1058133.lcssa134 = phi i16 [ %inc1058133135, %for.body1054 ] + store i16 %inc1058133.lcssa134, i16* %v_loop_01049, align 1 + br label %trap + +trap.loopexit57: ; preds = %for.body1040 + %inc1044130.lcssa131 = phi i16 [ %inc1044130132, %for.body1040 ] + store i16 %inc1044130.lcssa131, i16* %v_loop_01035, align 1 + br label %trap + +trap.loopexit58: ; preds = %for.body1024 + %inc1028127.lcssa128 = phi i16 [ %inc1028127129, %for.body1024 ] + store i16 %inc1028127.lcssa128, i16* %v_loop_01019, align 1 + br label %trap + +trap.loopexit59: ; preds = %for.body1012 + %inc1017124.lcssa125 = phi i16 [ %inc1017124126, %for.body1012 ] + store i16 %inc1017124.lcssa125, i16* %v_loop_01007, align 1 + br label %trap + +trap.loopexit60: ; preds = %for.body999 + %inc1004121.lcssa122 = phi i16 [ %inc1004121123, %for.body999 ] + store i16 %inc1004121.lcssa122, i16* %v_loop_0994, align 1 + br label %trap + +trap.loopexit61: ; preds = %for.body986 + %inc991118.lcssa119 = phi i16 [ %inc991118120, %for.body986 ] + store i16 %inc991118.lcssa119, i16* %v_loop_0981, align 1 + br label %trap + +trap.loopexit62: ; preds = %for.body973 + %inc978115.lcssa116 = phi i16 [ %inc978115117, %for.body973 ] + store i16 %inc978115.lcssa116, i16* %v_loop_0968, align 1 + br label %trap + +trap.loopexit63: ; preds = %for.body945 + %inc950112.lcssa113 = phi i16 [ %inc950112114, %for.body945 ] + store i16 %inc950112.lcssa113, i16* %v_loop_0940, align 1 + br label %trap + +trap.loopexit64: ; preds = %for.body918 + %inc922109.lcssa110 = phi i16 [ %inc922109111, %for.body918 ] + store i16 %inc922109.lcssa110, i16* %v_loop_0913, align 1 + br label %trap + +trap.loopexit65: ; preds = %for.body902 + %inc906106.lcssa107 = phi i16 [ %inc906106108, %for.body902 ] + store i16 %inc906106.lcssa107, i16* %v_loop_0897, align 1 + br label %trap + +trap.loopexit66: ; preds = %for.body890 + %inc895103.lcssa104 = phi i16 [ %inc895103105, %for.body890 ] + store i16 %inc895103.lcssa104, i16* %v_loop_0885, align 1 + br label %trap + +trap.loopexit67: ; preds = %for.body877 + %inc882100.lcssa101 = phi i16 [ %inc882100102, %for.body877 ] + store i16 %inc882100.lcssa101, i16* %v_loop_0872, align 1 + br label %trap + +trap.loopexit68: ; preds = %for.body864 + %inc86997.lcssa98 = phi i16 [ %inc8699799, %for.body864 ] + store i16 %inc86997.lcssa98, i16* %v_loop_0, align 1 + br label %trap + +trap.loopexit69: ; preds = %1306, %cond.end789 + %inc80195 = phi i16 [ %v_588.promoted, %cond.end789 ], [ %v_588.promoted, %1306 ] + %1869 = phi i64 [ %cond790, %cond.end789 ], [ %cond790, %1306 ] + store i64 %1869, i64* %tmp784, align 1 + store i16 %inc80195, i16* %v_588, align 1 + br label %trap + +trap.loopexit70: ; preds = %1263, %1256, %1250, %1244, %1238, %1232, %1227, %complex_mul_cont738, %1214, %1208, %1202, %1196, %1191, %complex_mul_cont718, %1178, %1172, %1166, %1160, %1154, %1148, %1142, %1136, %1130, %1124, %1119, %complex_mul_cont672, %1106, %1100, %1094, %1088, %1082, %1076, %1071, %complex_mul_cont650, %1058, %1052, %1046, %1040, %1035, %1028, %1022, %1016, %1010, %1004, %999, %complex_mul_cont622, %986, %980, %974, %968, %963, %956, %950, %complex_mul_cont600, %938, %932, %926, %920, %914, %908, %902, %896, %890, %884, %879, %872, %866, %860, %854, %848, %843, %836, %830, %824, %818, %for.body546 + %inc76291 = phi i16 [ %inc7629092, %for.body546 ], [ %inc7629092, %818 ], [ %inc7629092, %824 ], [ %inc7629092, %830 ], [ %inc7629092, %836 ], [ %inc7629092, %843 ], [ %inc7629092, %848 ], [ %inc7629092, %854 ], [ %inc7629092, %860 ], [ %inc7629092, %866 ], [ %inc7629092, %872 ], [ %inc7629092, %879 ], [ %inc7629092, %884 ], [ %inc7629092, %890 ], [ %inc7629092, %896 ], [ %inc7629092, %902 ], [ %inc7629092, %908 ], [ %inc7629092, %914 ], [ %inc7629092, %920 ], [ %inc7629092, %926 ], [ %inc7629092, %932 ], [ %inc7629092, %938 ], [ %inc7629092, %complex_mul_cont600 ], [ %inc7629092, %950 ], [ %inc7629092, %956 ], [ %inc7629092, %963 ], [ %inc7629092, %968 ], [ %inc7629092, %974 ], [ %inc7629092, %980 ], [ %inc7629092, %986 ], [ %inc7629092, %complex_mul_cont622 ], [ %inc7629092, %999 ], [ %inc7629092, %1004 ], [ %inc7629092, %1010 ], [ %inc7629092, %1016 ], [ %inc7629092, %1022 ], [ %inc7629092, %1028 ], [ %inc7629092, %1035 ], [ %inc7629092, %1040 ], [ %inc7629092, %1046 ], [ %inc7629092, %1052 ], [ %inc7629092, %1058 ], [ %inc7629092, %complex_mul_cont650 ], [ %inc7629092, %1071 ], [ %inc7629092, %1076 ], [ %inc7629092, %1082 ], [ %inc7629092, %1088 ], [ %inc7629092, %1094 ], [ %inc7629092, %1100 ], [ %inc7629092, %1106 ], [ %inc7629092, %complex_mul_cont672 ], [ %inc7629092, %1119 ], [ %inc7629092, %1124 ], [ %inc7629092, %1130 ], [ %inc7629092, %1136 ], [ %inc7629092, %1142 ], [ %inc7629092, %1148 ], [ %inc7629092, %1154 ], [ %inc7629092, %1160 ], [ %inc7629092, %1166 ], [ %inc7629092, %1172 ], [ %inc7629092, %1178 ], [ %inc7629092, %complex_mul_cont718 ], [ %inc7629092, %1191 ], [ %inc7629092, %1196 ], [ %inc7629092, %1202 ], [ %inc7629092, %1208 ], [ %inc7629092, %1214 ], [ %inc7629092, %complex_mul_cont738 ], [ %inc7629092, %1227 ], [ %inc7629092, %1232 ], [ %inc7629092, %1238 ], [ %inc7629092, %1244 ], [ %inc7629092, %1250 ], [ %inc7629092, %1256 ], [ %inc7629092, %1263 ] + store i16 %inc76291, i16* %v_562, align 1 + br label %trap + +trap.loopexit71: ; preds = %675, %668, %662, %652, %644, %638, %630, %624, %615, %607, %600, %594, %586, %580, %572, %566, %558, %549, %542, %534, %528, %519, %513, %503, %497, %491, %for.body365 + br label %trap + +trap.loopexit72: ; preds = %cond.end293 + %inc30886.lcssa87 = phi i32 [ %inc3088688, %cond.end293 ] + %.lcssa85 = phi i64 [ %430, %cond.end293 ] + %cond294.lcssa = phi i16 [ %cond294, %cond.end293 ] + %inc283.lcssa = phi i64 [ %inc283, %cond.end293 ] + store i64 %inc283.lcssa, i64* @v_456, align 1 + store i16 %cond294.lcssa, i16* %tmp287, align 1 + store i64 %.lcssa85, i64* %tmp298, align 1 + store i32 %inc30886.lcssa87, i32* %v_480, align 1 + br label %trap + +trap.loopexit73: ; preds = %408, %406, %404, %402, %399, %396, %394, %391, %388, %386, %383, %381, %378, %375, %for.body250 + %inc27580 = phi i32 [ %inc2757981, %for.body250 ], [ %inc2757981, %375 ], [ %inc2757981, %378 ], [ %inc2757981, %381 ], [ %inc2757981, %383 ], [ %inc2757981, %386 ], [ %inc2757981, %388 ], [ %inc2757981, %391 ], [ %inc2757981, %394 ], [ %inc2757981, %396 ], [ %inc2757981, %399 ], [ %inc2757981, %402 ], [ %inc2757981, %404 ], [ %inc2757981, %406 ], [ %inc2757981, %408 ] + store i32 %inc27580, i32* %v_loop_1, align 1 + br label %trap + +trap.loopexit74: ; preds = %282, %275, %269, %263, %256, %249, %243, %236, %229, %223, %216, %210, %203, %196, %189, %183, %176, %169, %162, %156, %149, %142, %135, %129, %122, %116, %109, %102, %95, %for.body65 + br label %trap + +trap: ; preds = %trap.loopexit74, %trap.loopexit73, %trap.loopexit72, %trap.loopexit71, %trap.loopexit70, %trap.loopexit69, %trap.loopexit68, %trap.loopexit67, %trap.loopexit66, %trap.loopexit65, %trap.loopexit64, %trap.loopexit63, %trap.loopexit62, %trap.loopexit61, %trap.loopexit60, %trap.loopexit59, %trap.loopexit58, %trap.loopexit57, %trap.loopexit56, %trap.loopexit55, %trap.loopexit54, %trap.loopexit53, %trap.loopexit52, %trap.loopexit51, %trap.loopexit50, %trap.loopexit49, %trap.loopexit48, %trap.loopexit47, %trap.loopexit46, %trap.loopexit45, %trap.loopexit44, %trap.loopexit43, %trap.loopexit42, %trap.loopexit41, %trap.loopexit, %if.end509, %742, %cond.end468, %66, %if.else, %entry + call void @llvm.trap() #5 + unreachable +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare i16 @llvm.smul.fix.i16(i16, i16, i32 immarg) #3 + +; Function Attrs: nounwind readnone +declare i16 @llvm.cowabunga.sub.i16(i16, i16) #2 + +declare dso_local void @__muldc3({ double, double }*, double, double, double, double) + +; Function Attrs: cold noreturn nounwind +declare void @llvm.trap() #4 + +attributes #0 = { argmemonly nofree nosync nounwind willreturn } +attributes #1 = { nounwind } +attributes #2 = { nounwind readnone } +attributes #3 = { nofree nosync nounwind readnone speculatable willreturn } +attributes #4 = { cold noreturn nounwind } +attributes #5 = { noreturn nounwind } + diff --git a/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll b/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll --- a/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll +++ b/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -indvars -scalar-evolution-use-expensive-range-sharpening -S | FileCheck %s +; RUN: opt < %s -indvars -scalar-evolution-use-expensive-range-sharpening -scalar-evolution-prove-implications-via-truncation -S | FileCheck %s target triple = "aarch64--linux-gnu" ; Provide legal integer types.