Index: llvm/test/Transforms/SLPVectorizer/AArch64/slp-reassociate-issue.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/SLPVectorizer/AArch64/slp-reassociate-issue.ll @@ -0,0 +1,320 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -slp-vectorizer -S -mtriple=aarch64-unknown-unknown < %s | FileCheck %s + +define i32 @complexloop(i8 %block_A, i8 %block_B, i32 %stride_x, i32 %loop_max) { +; CHECK-LABEL: @complexloop( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[STRIDE_X:%.*]] to i64 +; CHECK-NEXT: [[CMP57:%.*]] = icmp sgt i32 [[LOOP_MAX:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP57]], label [[FOR_COND3_PREHEADER_PREHEADER:%.*]], label [[FOR_END29:%.*]] +; CHECK: for.cond3.preheader.preheader: +; CHECK-NEXT: [[CONV1:%.*]] = zext i8 [[BLOCK_B:%.*]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = inttoptr i64 [[CONV1]] to i8* +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[BLOCK_A:%.*]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[CONV]] to i8* +; CHECK-NEXT: br label [[FOR_COND3_PREHEADER:%.*]] +; CHECK: for.cond3.preheader: +; CHECK-NEXT: [[C1_061:%.*]] = phi i8* [ [[C1A_062:%.*]], [[FOR_COND3_PREHEADER]] ], [ [[TMP1]], [[FOR_COND3_PREHEADER_PREHEADER]] ] +; CHECK-NEXT: [[SUM_060:%.*]] = phi i32 [ [[SELV_7:%.*]], [[FOR_COND3_PREHEADER]] ], [ 0, [[FOR_COND3_PREHEADER_PREHEADER]] ] +; CHECK-NEXT: [[J_059:%.*]] = phi i32 [ [[INC28:%.*]], [[FOR_COND3_PREHEADER]] ], [ 0, [[FOR_COND3_PREHEADER_PREHEADER]] ] +; CHECK-NEXT: [[C2_058:%.*]] = phi i8* [ [[ADD_PTR26:%.*]], [[FOR_COND3_PREHEADER]] ], [ [[TMP0]], [[FOR_COND3_PREHEADER_PREHEADER]] ] +; CHECK-NEXT: [[C1A_062]] = getelementptr inbounds i8, i8* [[C1_061]], i64 [[IDX_EXT]] +; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[C1_061]], align 1 +; CHECK-NEXT: [[CONV7:%.*]] = zext i8 [[TMP2]] to i32 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* [[C1A_062]], align 1 +; CHECK-NEXT: [[CONV10:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV10]], [[CONV7]] +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 1 +; CHECK-NEXT: [[TMP4:%.*]] = load i8, i8* [[ARRAYIDX13]], align 1 +; CHECK-NEXT: [[CONV14:%.*]] = zext i8 [[TMP4]] to i32 +; CHECK-NEXT: [[ADD15:%.*]] = add nuw nsw i32 [[ADD]], [[CONV14]] +; CHECK-NEXT: [[TMP5:%.*]] = load i8, i8* [[C2_058]], align 1 +; CHECK-NEXT: [[CONV18:%.*]] = zext i8 [[TMP5]] to i32 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[ADD15]], [[CONV18]] +; CHECK-NEXT: [[CMP19:%.*]] = icmp slt i32 [[SUB]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = sub nsw i32 0, [[SUB]] +; CHECK-NEXT: [[SELV_P:%.*]] = select i1 [[CMP19]], i32 [[TMP6]], i32 [[SUB]] +; CHECK-NEXT: [[SELV:%.*]] = add i32 [[SELV_P]], [[SUM_060]] +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 1 +; CHECK-NEXT: [[TMP7:%.*]] = load i8, i8* [[ARRAYIDX_1]], align 1 +; CHECK-NEXT: [[CONV7_1:%.*]] = zext i8 [[TMP7]] to i32 +; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[CONV7_1]], [[CONV14]] +; CHECK-NEXT: [[ARRAYIDX13_1:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 2 +; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[ARRAYIDX13_1]], align 1 +; CHECK-NEXT: [[CONV14_1:%.*]] = zext i8 [[TMP8]] to i32 +; CHECK-NEXT: [[ADD15_1:%.*]] = add nuw nsw i32 [[ADD_1]], [[CONV14_1]] +; CHECK-NEXT: [[ARRAYIDX17_1:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 1 +; CHECK-NEXT: [[TMP9:%.*]] = load i8, i8* [[ARRAYIDX17_1]], align 1 +; CHECK-NEXT: [[CONV18_1:%.*]] = zext i8 [[TMP9]] to i32 +; CHECK-NEXT: [[SUB_1:%.*]] = sub nsw i32 [[ADD15_1]], [[CONV18_1]] +; CHECK-NEXT: [[CMP19_1:%.*]] = icmp slt i32 [[SUB_1]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = sub nsw i32 0, [[SUB_1]] +; CHECK-NEXT: [[SELV_P_1:%.*]] = select i1 [[CMP19_1]], i32 [[TMP10]], i32 [[SUB_1]] +; CHECK-NEXT: [[SELV_1:%.*]] = add i32 [[SELV]], [[SELV_P_1]] +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 2 +; CHECK-NEXT: [[TMP11:%.*]] = load i8, i8* [[ARRAYIDX_2]], align 1 +; CHECK-NEXT: [[CONV7_2:%.*]] = zext i8 [[TMP11]] to i32 +; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[CONV7_2]], [[CONV14_1]] +; CHECK-NEXT: [[ARRAYIDX13_2:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 3 +; CHECK-NEXT: [[TMP12:%.*]] = load i8, i8* [[ARRAYIDX13_2]], align 1 +; CHECK-NEXT: [[CONV14_2:%.*]] = zext i8 [[TMP12]] to i32 +; CHECK-NEXT: [[ADD15_2:%.*]] = add nuw nsw i32 [[ADD_2]], [[CONV14_2]] +; CHECK-NEXT: [[ARRAYIDX17_2:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 2 +; CHECK-NEXT: [[TMP13:%.*]] = load i8, i8* [[ARRAYIDX17_2]], align 1 +; CHECK-NEXT: [[CONV18_2:%.*]] = zext i8 [[TMP13]] to i32 +; CHECK-NEXT: [[SUB_2:%.*]] = sub nsw i32 [[ADD15_2]], [[CONV18_2]] +; CHECK-NEXT: [[CMP19_2:%.*]] = icmp slt i32 [[SUB_2]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = sub nsw i32 0, [[SUB_2]] +; CHECK-NEXT: [[SELV_P_2:%.*]] = select i1 [[CMP19_2]], i32 [[TMP14]], i32 [[SUB_2]] +; CHECK-NEXT: [[SELV_2:%.*]] = add i32 [[SELV_1]], [[SELV_P_2]] +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 3 +; CHECK-NEXT: [[TMP15:%.*]] = load i8, i8* [[ARRAYIDX_3]], align 1 +; CHECK-NEXT: [[CONV7_3:%.*]] = zext i8 [[TMP15]] to i32 +; CHECK-NEXT: [[ADD_3:%.*]] = add nuw nsw i32 [[CONV7_3]], [[CONV14_2]] +; CHECK-NEXT: [[ARRAYIDX13_3:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 4 +; CHECK-NEXT: [[TMP16:%.*]] = load i8, i8* [[ARRAYIDX13_3]], align 1 +; CHECK-NEXT: [[CONV14_3:%.*]] = zext i8 [[TMP16]] to i32 +; CHECK-NEXT: [[ADD15_3:%.*]] = add nuw nsw i32 [[ADD_3]], [[CONV14_3]] +; CHECK-NEXT: [[ARRAYIDX17_3:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 3 +; CHECK-NEXT: [[TMP17:%.*]] = load i8, i8* [[ARRAYIDX17_3]], align 1 +; CHECK-NEXT: [[CONV18_3:%.*]] = zext i8 [[TMP17]] to i32 +; CHECK-NEXT: [[SUB_3:%.*]] = sub nsw i32 [[ADD15_3]], [[CONV18_3]] +; CHECK-NEXT: [[CMP19_3:%.*]] = icmp slt i32 [[SUB_3]], 0 +; CHECK-NEXT: [[TMP18:%.*]] = sub nsw i32 0, [[SUB_3]] +; CHECK-NEXT: [[SELV_P_3:%.*]] = select i1 [[CMP19_3]], i32 [[TMP18]], i32 [[SUB_3]] +; CHECK-NEXT: [[SELV_3:%.*]] = add i32 [[SELV_2]], [[SELV_P_3]] +; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 4 +; CHECK-NEXT: [[TMP19:%.*]] = load i8, i8* [[ARRAYIDX_4]], align 1 +; CHECK-NEXT: [[CONV7_4:%.*]] = zext i8 [[TMP19]] to i32 +; CHECK-NEXT: [[ADD_4:%.*]] = add nuw nsw i32 [[CONV7_4]], [[CONV14_3]] +; CHECK-NEXT: [[ARRAYIDX13_4:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 5 +; CHECK-NEXT: [[TMP20:%.*]] = load i8, i8* [[ARRAYIDX13_4]], align 1 +; CHECK-NEXT: [[CONV14_4:%.*]] = zext i8 [[TMP20]] to i32 +; CHECK-NEXT: [[ADD15_4:%.*]] = add nuw nsw i32 [[ADD_4]], [[CONV14_4]] +; CHECK-NEXT: [[ARRAYIDX17_4:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 4 +; CHECK-NEXT: [[TMP21:%.*]] = load i8, i8* [[ARRAYIDX17_4]], align 1 +; CHECK-NEXT: [[CONV18_4:%.*]] = zext i8 [[TMP21]] to i32 +; CHECK-NEXT: [[SUB_4:%.*]] = sub nsw i32 [[ADD15_4]], [[CONV18_4]] +; CHECK-NEXT: [[CMP19_4:%.*]] = icmp slt i32 [[SUB_4]], 0 +; CHECK-NEXT: [[TMP22:%.*]] = sub nsw i32 0, [[SUB_4]] +; CHECK-NEXT: [[SELV_P_4:%.*]] = select i1 [[CMP19_4]], i32 [[TMP22]], i32 [[SUB_4]] +; CHECK-NEXT: [[SELV_4:%.*]] = add i32 [[SELV_3]], [[SELV_P_4]] +; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 5 +; CHECK-NEXT: [[TMP23:%.*]] = load i8, i8* [[ARRAYIDX_5]], align 1 +; CHECK-NEXT: [[CONV7_5:%.*]] = zext i8 [[TMP23]] to i32 +; CHECK-NEXT: [[ADD_5:%.*]] = add nuw nsw i32 [[CONV7_5]], [[CONV14_4]] +; CHECK-NEXT: [[ARRAYIDX13_5:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 6 +; CHECK-NEXT: [[TMP24:%.*]] = load i8, i8* [[ARRAYIDX13_5]], align 1 +; CHECK-NEXT: [[CONV14_5:%.*]] = zext i8 [[TMP24]] to i32 +; CHECK-NEXT: [[ADD15_5:%.*]] = add nuw nsw i32 [[ADD_5]], [[CONV14_5]] +; CHECK-NEXT: [[ARRAYIDX17_5:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 5 +; CHECK-NEXT: [[TMP25:%.*]] = load i8, i8* [[ARRAYIDX17_5]], align 1 +; CHECK-NEXT: [[CONV18_5:%.*]] = zext i8 [[TMP25]] to i32 +; CHECK-NEXT: [[SUB_5:%.*]] = sub nsw i32 [[ADD15_5]], [[CONV18_5]] +; CHECK-NEXT: [[CMP19_5:%.*]] = icmp slt i32 [[SUB_5]], 0 +; CHECK-NEXT: [[TMP26:%.*]] = sub nsw i32 0, [[SUB_5]] +; CHECK-NEXT: [[SELV_P_5:%.*]] = select i1 [[CMP19_5]], i32 [[TMP26]], i32 [[SUB_5]] +; CHECK-NEXT: [[SELV_5:%.*]] = add i32 [[SELV_4]], [[SELV_P_5]] +; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 6 +; CHECK-NEXT: [[TMP27:%.*]] = load i8, i8* [[ARRAYIDX_6]], align 1 +; CHECK-NEXT: [[CONV7_6:%.*]] = zext i8 [[TMP27]] to i32 +; CHECK-NEXT: [[ADD_6:%.*]] = add nuw nsw i32 [[CONV7_6]], [[CONV14_5]] +; CHECK-NEXT: [[ARRAYIDX13_6:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 7 +; CHECK-NEXT: [[TMP28:%.*]] = load i8, i8* [[ARRAYIDX13_6]], align 1 +; CHECK-NEXT: [[CONV14_6:%.*]] = zext i8 [[TMP28]] to i32 +; CHECK-NEXT: [[ADD15_6:%.*]] = add nuw nsw i32 [[ADD_6]], [[CONV14_6]] +; CHECK-NEXT: [[ARRAYIDX17_6:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 6 +; CHECK-NEXT: [[TMP29:%.*]] = load i8, i8* [[ARRAYIDX17_6]], align 1 +; CHECK-NEXT: [[CONV18_6:%.*]] = zext i8 [[TMP29]] to i32 +; CHECK-NEXT: [[SUB_6:%.*]] = sub nsw i32 [[ADD15_6]], [[CONV18_6]] +; CHECK-NEXT: [[CMP19_6:%.*]] = icmp slt i32 [[SUB_6]], 0 +; CHECK-NEXT: [[TMP30:%.*]] = sub nsw i32 0, [[SUB_6]] +; CHECK-NEXT: [[SELV_P_6:%.*]] = select i1 [[CMP19_6]], i32 [[TMP30]], i32 [[SUB_6]] +; CHECK-NEXT: [[SELV_6:%.*]] = add i32 [[SELV_5]], [[SELV_P_6]] +; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, i8* [[C1_061]], i64 7 +; CHECK-NEXT: [[TMP31:%.*]] = load i8, i8* [[ARRAYIDX_7]], align 1 +; CHECK-NEXT: [[CONV7_7:%.*]] = zext i8 [[TMP31]] to i32 +; CHECK-NEXT: [[ADD_7:%.*]] = add nuw nsw i32 [[CONV7_7]], [[CONV14_6]] +; CHECK-NEXT: [[ARRAYIDX13_7:%.*]] = getelementptr inbounds i8, i8* [[C1A_062]], i64 8 +; CHECK-NEXT: [[TMP32:%.*]] = load i8, i8* [[ARRAYIDX13_7]], align 1 +; CHECK-NEXT: [[CONV14_7:%.*]] = zext i8 [[TMP32]] to i32 +; CHECK-NEXT: [[ADD15_7:%.*]] = add nuw nsw i32 [[ADD_7]], [[CONV14_7]] +; CHECK-NEXT: [[ARRAYIDX17_7:%.*]] = getelementptr inbounds i8, i8* [[C2_058]], i64 7 +; CHECK-NEXT: [[TMP33:%.*]] = load i8, i8* [[ARRAYIDX17_7]], align 1 +; CHECK-NEXT: [[CONV18_7:%.*]] = zext i8 [[TMP33]] to i32 +; CHECK-NEXT: [[SUB_7:%.*]] = sub nsw i32 [[ADD15_7]], [[CONV18_7]] +; CHECK-NEXT: [[CMP19_7:%.*]] = icmp slt i32 [[SUB_7]], 0 +; CHECK-NEXT: [[TMP34:%.*]] = sub nsw i32 0, [[SUB_7]] +; CHECK-NEXT: [[SELV_P_7:%.*]] = select i1 [[CMP19_7]], i32 [[TMP34]], i32 [[SUB_7]] +; CHECK-NEXT: [[SELV_7]] = add i32 [[SELV_6]], [[SELV_P_7]] +; CHECK-NEXT: [[ADD_PTR26]] = getelementptr inbounds i8, i8* [[C2_058]], i64 [[IDX_EXT]] +; CHECK-NEXT: [[INC28]] = add nuw nsw i32 [[J_059]], 1 +; CHECK-NEXT: [[EXITCOND63_NOT:%.*]] = icmp eq i32 [[INC28]], [[LOOP_MAX]] +; CHECK-NEXT: br i1 [[EXITCOND63_NOT]], label [[FOR_END29_LOOPEXIT:%.*]], label [[FOR_COND3_PREHEADER]] +; CHECK: for.end29.loopexit: +; CHECK-NEXT: [[SELV_7_LCSSA:%.*]] = phi i32 [ [[SELV_7]], [[FOR_COND3_PREHEADER]] ] +; CHECK-NEXT: br label [[FOR_END29]] +; CHECK: for.end29: +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[SELV_7_LCSSA]], [[FOR_END29_LOOPEXIT]] ] +; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] +; +entry: + %idx.ext = sext i32 %stride_x to i64 + %cmp57 = icmp sgt i32 %loop_max, 0 + br i1 %cmp57, label %for.cond3.preheader.preheader, label %for.end29 + +for.cond3.preheader.preheader: ; preds = %entry + %conv1 = zext i8 %block_B to i64 + %0 = inttoptr i64 %conv1 to i8* + %conv = zext i8 %block_A to i64 + %1 = inttoptr i64 %conv to i8* + br label %for.cond3.preheader + +for.cond3.preheader: ; preds = %for.cond3.preheader.preheader, %for.cond3.preheader + %c1.061 = phi i8* [ %c1a.062, %for.cond3.preheader ], [ %1, %for.cond3.preheader.preheader ] + %sum.060 = phi i32 [ %selv.7, %for.cond3.preheader ], [ 0, %for.cond3.preheader.preheader ] + %j.059 = phi i32 [ %inc28, %for.cond3.preheader ], [ 0, %for.cond3.preheader.preheader ] + %c2.058 = phi i8* [ %add.ptr26, %for.cond3.preheader ], [ %0, %for.cond3.preheader.preheader ] + %c1a.062 = getelementptr inbounds i8, i8* %c1.061, i64 %idx.ext + %2 = load i8, i8* %c1.061, align 1 + %conv7 = zext i8 %2 to i32 + %3 = load i8, i8* %c1a.062, align 1 + %conv10 = zext i8 %3 to i32 + %add = add nuw nsw i32 %conv10, %conv7 + %arrayidx13 = getelementptr inbounds i8, i8* %c1a.062, i64 1 + %4 = load i8, i8* %arrayidx13, align 1 + %conv14 = zext i8 %4 to i32 + %add15 = add nuw nsw i32 %add, %conv14 + %5 = load i8, i8* %c2.058, align 1 + %conv18 = zext i8 %5 to i32 + %sub = sub nsw i32 %add15, %conv18 + %cmp19 = icmp slt i32 %sub, 0 + %6 = sub nsw i32 0, %sub + %selv.p = select i1 %cmp19, i32 %6, i32 %sub + %selv = add i32 %selv.p, %sum.060 + %arrayidx.1 = getelementptr inbounds i8, i8* %c1.061, i64 1 + %7 = load i8, i8* %arrayidx.1, align 1 + %conv7.1 = zext i8 %7 to i32 + %add.1 = add nuw nsw i32 %conv7.1, %conv14 + %arrayidx13.1 = getelementptr inbounds i8, i8* %c1a.062, i64 2 + %8 = load i8, i8* %arrayidx13.1, align 1 + %conv14.1 = zext i8 %8 to i32 + %add15.1 = add nuw nsw i32 %add.1, %conv14.1 + %arrayidx17.1 = getelementptr inbounds i8, i8* %c2.058, i64 1 + %9 = load i8, i8* %arrayidx17.1, align 1 + %conv18.1 = zext i8 %9 to i32 + %sub.1 = sub nsw i32 %add15.1, %conv18.1 + %cmp19.1 = icmp slt i32 %sub.1, 0 + %10 = sub nsw i32 0, %sub.1 + %selv.p.1 = select i1 %cmp19.1, i32 %10, i32 %sub.1 + %selv.1 = add i32 %selv, %selv.p.1 + %arrayidx.2 = getelementptr inbounds i8, i8* %c1.061, i64 2 + %11 = load i8, i8* %arrayidx.2, align 1 + %conv7.2 = zext i8 %11 to i32 + %add.2 = add nuw nsw i32 %conv7.2, %conv14.1 + %arrayidx13.2 = getelementptr inbounds i8, i8* %c1a.062, i64 3 + %12 = load i8, i8* %arrayidx13.2, align 1 + %conv14.2 = zext i8 %12 to i32 + %add15.2 = add nuw nsw i32 %add.2, %conv14.2 + %arrayidx17.2 = getelementptr inbounds i8, i8* %c2.058, i64 2 + %13 = load i8, i8* %arrayidx17.2, align 1 + %conv18.2 = zext i8 %13 to i32 + %sub.2 = sub nsw i32 %add15.2, %conv18.2 + %cmp19.2 = icmp slt i32 %sub.2, 0 + %14 = sub nsw i32 0, %sub.2 + %selv.p.2 = select i1 %cmp19.2, i32 %14, i32 %sub.2 + %selv.2 = add i32 %selv.1, %selv.p.2 + %arrayidx.3 = getelementptr inbounds i8, i8* %c1.061, i64 3 + %15 = load i8, i8* %arrayidx.3, align 1 + %conv7.3 = zext i8 %15 to i32 + %add.3 = add nuw nsw i32 %conv7.3, %conv14.2 + %arrayidx13.3 = getelementptr inbounds i8, i8* %c1a.062, i64 4 + %16 = load i8, i8* %arrayidx13.3, align 1 + %conv14.3 = zext i8 %16 to i32 + %add15.3 = add nuw nsw i32 %add.3, %conv14.3 + %arrayidx17.3 = getelementptr inbounds i8, i8* %c2.058, i64 3 + %17 = load i8, i8* %arrayidx17.3, align 1 + %conv18.3 = zext i8 %17 to i32 + %sub.3 = sub nsw i32 %add15.3, %conv18.3 + %cmp19.3 = icmp slt i32 %sub.3, 0 + %18 = sub nsw i32 0, %sub.3 + %selv.p.3 = select i1 %cmp19.3, i32 %18, i32 %sub.3 + %selv.3 = add i32 %selv.2, %selv.p.3 + %arrayidx.4 = getelementptr inbounds i8, i8* %c1.061, i64 4 + %19 = load i8, i8* %arrayidx.4, align 1 + %conv7.4 = zext i8 %19 to i32 + %add.4 = add nuw nsw i32 %conv7.4, %conv14.3 + %arrayidx13.4 = getelementptr inbounds i8, i8* %c1a.062, i64 5 + %20 = load i8, i8* %arrayidx13.4, align 1 + %conv14.4 = zext i8 %20 to i32 + %add15.4 = add nuw nsw i32 %add.4, %conv14.4 + %arrayidx17.4 = getelementptr inbounds i8, i8* %c2.058, i64 4 + %21 = load i8, i8* %arrayidx17.4, align 1 + %conv18.4 = zext i8 %21 to i32 + %sub.4 = sub nsw i32 %add15.4, %conv18.4 + %cmp19.4 = icmp slt i32 %sub.4, 0 + %22 = sub nsw i32 0, %sub.4 + %selv.p.4 = select i1 %cmp19.4, i32 %22, i32 %sub.4 + %selv.4 = add i32 %selv.3, %selv.p.4 + %arrayidx.5 = getelementptr inbounds i8, i8* %c1.061, i64 5 + %23 = load i8, i8* %arrayidx.5, align 1 + %conv7.5 = zext i8 %23 to i32 + %add.5 = add nuw nsw i32 %conv7.5, %conv14.4 + %arrayidx13.5 = getelementptr inbounds i8, i8* %c1a.062, i64 6 + %24 = load i8, i8* %arrayidx13.5, align 1 + %conv14.5 = zext i8 %24 to i32 + %add15.5 = add nuw nsw i32 %add.5, %conv14.5 + %arrayidx17.5 = getelementptr inbounds i8, i8* %c2.058, i64 5 + %25 = load i8, i8* %arrayidx17.5, align 1 + %conv18.5 = zext i8 %25 to i32 + %sub.5 = sub nsw i32 %add15.5, %conv18.5 + %cmp19.5 = icmp slt i32 %sub.5, 0 + %26 = sub nsw i32 0, %sub.5 + %selv.p.5 = select i1 %cmp19.5, i32 %26, i32 %sub.5 + %selv.5 = add i32 %selv.4, %selv.p.5 + %arrayidx.6 = getelementptr inbounds i8, i8* %c1.061, i64 6 + %27 = load i8, i8* %arrayidx.6, align 1 + %conv7.6 = zext i8 %27 to i32 + %add.6 = add nuw nsw i32 %conv7.6, %conv14.5 + %arrayidx13.6 = getelementptr inbounds i8, i8* %c1a.062, i64 7 + %28 = load i8, i8* %arrayidx13.6, align 1 + %conv14.6 = zext i8 %28 to i32 + %add15.6 = add nuw nsw i32 %add.6, %conv14.6 + %arrayidx17.6 = getelementptr inbounds i8, i8* %c2.058, i64 6 + %29 = load i8, i8* %arrayidx17.6, align 1 + %conv18.6 = zext i8 %29 to i32 + %sub.6 = sub nsw i32 %add15.6, %conv18.6 + %cmp19.6 = icmp slt i32 %sub.6, 0 + %30 = sub nsw i32 0, %sub.6 + %selv.p.6 = select i1 %cmp19.6, i32 %30, i32 %sub.6 + %selv.6 = add i32 %selv.5, %selv.p.6 + %arrayidx.7 = getelementptr inbounds i8, i8* %c1.061, i64 7 + %31 = load i8, i8* %arrayidx.7, align 1 + %conv7.7 = zext i8 %31 to i32 + %add.7 = add nuw nsw i32 %conv7.7, %conv14.6 + %arrayidx13.7 = getelementptr inbounds i8, i8* %c1a.062, i64 8 + %32 = load i8, i8* %arrayidx13.7, align 1 + %conv14.7 = zext i8 %32 to i32 + %add15.7 = add nuw nsw i32 %add.7, %conv14.7 + %arrayidx17.7 = getelementptr inbounds i8, i8* %c2.058, i64 7 + %33 = load i8, i8* %arrayidx17.7, align 1 + %conv18.7 = zext i8 %33 to i32 + %sub.7 = sub nsw i32 %add15.7, %conv18.7 + %cmp19.7 = icmp slt i32 %sub.7, 0 + %34 = sub nsw i32 0, %sub.7 + %selv.p.7 = select i1 %cmp19.7, i32 %34, i32 %sub.7 + %selv.7 = add i32 %selv.6, %selv.p.7 + %add.ptr26 = getelementptr inbounds i8, i8* %c2.058, i64 %idx.ext + %inc28 = add nuw nsw i32 %j.059, 1 + %exitcond63.not = icmp eq i32 %inc28, %loop_max + br i1 %exitcond63.not, label %for.end29.loopexit, label %for.cond3.preheader + +for.end29.loopexit: ; preds = %for.cond3.preheader + %selv.7.lcssa = phi i32 [ %selv.7, %for.cond3.preheader ] + br label %for.end29 + +for.end29: ; preds = %for.end29.loopexit, %entry + %sum.0.lcssa = phi i32 [ 0, %entry ], [ %selv.7.lcssa, %for.end29.loopexit ] + ret i32 %sum.0.lcssa +} +