diff --git a/llvm/lib/CodeGen/LiveVariables.cpp b/llvm/lib/CodeGen/LiveVariables.cpp --- a/llvm/lib/CodeGen/LiveVariables.cpp +++ b/llvm/lib/CodeGen/LiveVariables.cpp @@ -828,7 +828,8 @@ BBE = SuccBB->end(); BBI != BBE && BBI->isPHI(); ++BBI) { for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) - if (BBI->getOperand(i + 1).getMBB() == BB) + if (BBI->getOperand(i + 1).getMBB() == BB && + BBI->getOperand(i).readsReg()) getVarInfo(BBI->getOperand(i).getReg()) .AliveBlocks.set(NumNew); } diff --git a/llvm/test/CodeGen/AArch64/PHIElimination-crash.mir b/llvm/test/CodeGen/AArch64/PHIElimination-crash.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/PHIElimination-crash.mir @@ -0,0 +1,428 @@ +# RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s \ +# RUN: -run-pass=processimpdefs,livevars,machine-loops,phi-node-elimination,twoaddressinstruction + +# Don't crash. + +--- | + ; ModuleID = 'shrink-wrap.ll' + source_filename = "shrink-wrap.ll" + target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + target triple = "aarch64-unknown-linux-gnu" + + %type1 = type { i32, i32, i32 } + + @g1 = external unnamed_addr global i32, align 4 + @g2 = external unnamed_addr global i1 + @g3 = external unnamed_addr global [144 x i32], align 4 + @g4 = external unnamed_addr constant [144 x i32], align 4 + @g5 = external unnamed_addr constant [144 x i32], align 4 + @g6 = external unnamed_addr constant [144 x i32], align 4 + @g7 = external unnamed_addr constant [144 x i32], align 4 + @g8 = external unnamed_addr constant [144 x i32], align 4 + @g9 = external unnamed_addr constant [144 x i32], align 4 + @g10 = external unnamed_addr constant [144 x i32], align 4 + @g11 = external unnamed_addr global i32, align 4 + @g12 = external unnamed_addr global [144 x [144 x i8]], align 1 + @g13 = external unnamed_addr global %type1*, align 8 + @g14 = external unnamed_addr global [144 x [144 x i8]], align 1 + @g15 = external unnamed_addr global [144 x [144 x i8]], align 1 + @g16 = external unnamed_addr global [144 x [144 x i8]], align 1 + @g17 = external unnamed_addr global [62 x i32], align 4 + @g18 = external unnamed_addr global i32, align 4 + @g19 = external unnamed_addr constant [144 x i32], align 4 + @g20 = external unnamed_addr global [144 x [144 x i8]], align 1 + @g21 = external unnamed_addr global i32, align 4 + + declare fastcc i32 @foo() + + declare fastcc i32 @bar() + + define internal fastcc i32 @func(i32 %alpha, i32 %beta) { + entry: + %v1 = alloca [2 x [11 x i32]], align 4 + %v2 = alloca [11 x i32], align 16 + %v3 = alloca [11 x i32], align 16 + switch i32 undef, label %if.end.9 [ + i32 4, label %if.then.6 + i32 3, label %if.then.2 + ] + + if.then.2: ; preds = %entry + %call3 = tail call fastcc i32 @bar() + ret i32 %call3 + + if.then.6: ; preds = %entry + %call7 = tail call fastcc i32 @foo() + unreachable + + if.end.9: ; preds = %entry + %tmp = load i32, i32* @g1, align 4 + %rem.i = urem i32 %tmp, 1000000 + %idxprom.1.i = zext i32 %rem.i to i64 + %tmp1 = load %type1*, %type1** @g13, align 8 + %v4 = getelementptr inbounds %type1, %type1* %tmp1, i64 %idxprom.1.i, i32 0 + %.b = load i1, i1* @g2, align 1 + %v5 = select i1 %.b, i32 2, i32 0 + %tmp2 = load i32, i32* @g18, align 4 + %tmp3 = load i32, i32* @g11, align 4 + %idxprom58 = sext i32 %tmp3 to i64 + %tmp4 = load i32, i32* @g21, align 4 + %idxprom69 = sext i32 %tmp4 to i64 + br label %for.body + + for.body: ; preds = %for.inc, %if.end.9 + %v6 = phi i32 [ 0, %if.end.9 ], [ %v7, %for.inc ] + %a.0983 = phi i32 [ 1, %if.end.9 ], [ %a.1, %for.inc ] + %arrayidx = getelementptr inbounds [62 x i32], [62 x i32]* @g17, i64 0, i64 undef + %tmp5 = load i32, i32* %arrayidx, align 4 + %idxprom53 = sext i32 %tmp5 to i64 + br i1 undef, label %for.inc, label %if.else.51 + + if.else.51: ; preds = %for.body + %arrayidx54 = getelementptr inbounds [144 x i32], [144 x i32]* @g3, i64 0, i64 %idxprom53 + %tmp6 = load i32, i32* %arrayidx54, align 4 + switch i32 %tmp6, label %for.inc [ + i32 1, label %block.bb + i32 10, label %block.bb.159 + i32 7, label %block.bb.75 + i32 8, label %block.bb.87 + i32 9, label %block.bb.147 + i32 12, label %block.bb.111 + i32 3, label %block.bb.123 + i32 4, label %block.bb.135 + ] + + block.bb: ; preds = %if.else.51 + %arrayidx56 = getelementptr inbounds [144 x i32], [144 x i32]* @g6, i64 0, i64 %idxprom53 + %tmp7 = load i32, i32* %arrayidx56, align 4 + %shr = ashr i32 %tmp7, %v5 + %add57 = add nsw i32 %shr, 0 + %arrayidx61 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g14, i64 0, i64 %idxprom53, i64 %idxprom58 + %tmp8 = load i8, i8* %arrayidx61, align 1 + %conv = zext i8 %tmp8 to i32 + %add62 = add nsw i32 %conv, %add57 + br label %for.inc + + block.bb.75: ; preds = %if.else.51 + %arrayidx78 = getelementptr inbounds [144 x i32], [144 x i32]* @g10, i64 0, i64 %idxprom53 + %tmp9 = load i32, i32* %arrayidx78, align 4 + %shr79 = ashr i32 %tmp9, %v5 + %add80 = add nsw i32 %shr79, 0 + %add86 = add nsw i32 0, %add80 + br label %for.inc + + block.bb.87: ; preds = %if.else.51 + %arrayidx90 = getelementptr inbounds [144 x i32], [144 x i32]* @g9, i64 0, i64 %idxprom53 + %tmp10 = load i32, i32* %arrayidx90, align 4 + %shr91 = ashr i32 %tmp10, 0 + %sub92 = sub nsw i32 0, %shr91 + %arrayidx96 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g15, i64 0, i64 %idxprom53, i64 %idxprom69 + %tmp11 = load i8, i8* %arrayidx96, align 1 + %conv97 = zext i8 %tmp11 to i32 + %sub98 = sub nsw i32 %sub92, %conv97 + br label %for.inc + + block.bb.111: ; preds = %if.else.51 + %arrayidx114 = getelementptr inbounds [144 x i32], [144 x i32]* @g19, i64 0, i64 %idxprom53 + %tmp12 = load i32, i32* %arrayidx114, align 4 + %shr115 = ashr i32 %tmp12, 0 + %sub116 = sub nsw i32 0, %shr115 + %arrayidx120 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g12, i64 0, i64 %idxprom53, i64 %idxprom69 + %tmp13 = load i8, i8* %arrayidx120, align 1 + %conv121 = zext i8 %tmp13 to i32 + %sub122 = sub nsw i32 %sub116, %conv121 + br label %for.inc + + block.bb.123: ; preds = %if.else.51 + %arrayidx126 = getelementptr inbounds [144 x i32], [144 x i32]* @g5, i64 0, i64 %idxprom53 + %tmp14 = load i32, i32* %arrayidx126, align 4 + %shr127 = ashr i32 %tmp14, %v5 + %add128 = add nsw i32 %shr127, 0 + %add134 = add nsw i32 0, %add128 + br label %for.inc + + block.bb.135: ; preds = %if.else.51 + %arrayidx138 = getelementptr inbounds [144 x i32], [144 x i32]* @g4, i64 0, i64 %idxprom53 + %tmp15 = load i32, i32* %arrayidx138, align 4 + %shr139 = ashr i32 %tmp15, 0 + %sub140 = sub nsw i32 0, %shr139 + %arrayidx144 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g20, i64 0, i64 %idxprom53, i64 %idxprom69 + %tmp16 = load i8, i8* %arrayidx144, align 1 + %conv145 = zext i8 %tmp16 to i32 + %sub146 = sub nsw i32 %sub140, %conv145 + br label %for.inc + + block.bb.147: ; preds = %if.else.51 + %arrayidx150 = getelementptr inbounds [144 x i32], [144 x i32]* @g8, i64 0, i64 %idxprom53 + %tmp17 = load i32, i32* %arrayidx150, align 4 + %shr151 = ashr i32 %tmp17, %v5 + %add152 = add nsw i32 %shr151, 0 + %arrayidx156 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g16, i64 0, i64 %idxprom53, i64 %idxprom58 + %tmp18 = load i8, i8* %arrayidx156, align 1 + %conv157 = zext i8 %tmp18 to i32 + %add158 = add nsw i32 %conv157, %add152 + br label %for.inc + + block.bb.159: ; preds = %if.else.51 + %sub160 = add nsw i32 %v6, -450 + %arrayidx162 = getelementptr inbounds [144 x i32], [144 x i32]* @g7, i64 0, i64 %idxprom53 + %tmp19 = load i32, i32* %arrayidx162, align 4 + %shr163 = ashr i32 %tmp19, 0 + %sub164 = sub nsw i32 %sub160, %shr163 + %sub170 = sub nsw i32 %sub164, 0 + br label %for.inc + + for.inc: ; preds = %block.bb.159, %block.bb.147, %block.bb.135, %block.bb.123, %block.bb.111, %block.bb.87, %block.bb.75, %block.bb, %if.else.51, %for.body + %v7 = phi i32 [ %v6, %for.body ], [ %v6, %if.else.51 ], [ %sub170, %block.bb.159 ], [ %add158, %block.bb.147 ], [ %sub146, %block.bb.135 ], [ %add134, %block.bb.123 ], [ %sub122, %block.bb.111 ], [ %sub98, %block.bb.87 ], [ %add86, %block.bb.75 ], [ %add62, %block.bb ] + %a.1 = phi i32 [ %a.0983, %for.body ], [ undef, %if.else.51 ], [ undef, %block.bb.159 ], [ undef, %block.bb.147 ], [ undef, %block.bb.135 ], [ undef, %block.bb.123 ], [ undef, %block.bb.111 ], [ undef, %block.bb.87 ], [ undef, %block.bb.75 ], [ undef, %block.bb ] + %cmp48 = icmp sgt i32 %a.1, %tmp2 + br i1 %cmp48, label %for.end, label %for.body + + for.end: ; preds = %for.inc + store i32 %tmp, i32* %v4, align 4 + %hold_hash.i.7 = getelementptr inbounds %type1, %type1* %tmp1, i64 %idxprom.1.i, i32 1 + store i32 0, i32* %hold_hash.i.7, align 4 + ret i32 undef + } + + ; Function Attrs: nounwind + declare void @llvm.stackprotector(i8*, i8**) #0 + + attributes #0 = { nounwind } + +... +--- +name: func +alignment: 4 +tracksRegLiveness: true +liveins: [] +jumpTable: + kind: block-address + entries: + - id: 0 + blocks: [ '%bb.6', '%bb.14', '%bb.10', '%bb.11', '%bb.14', + '%bb.14', '%bb.7', '%bb.8', '%bb.12', '%bb.13', + '%bb.14', '%bb.9' ] +body: | + bb.0.entry: + successors: %bb.1(0x3fffffff), %bb.16(0x40000001) + + %23:gpr32 = COPY $wzr + CBNZW %23, %bb.1 + B %bb.16 + + bb.16.entry: + successors: %bb.2(0x00000002), %bb.3(0x7ffffffe) + + %24:gpr32 = COPY $wzr + CBNZW %24, %bb.2 + B %bb.3 + + bb.1.if.then.2: + TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp + + bb.2.if.then.6: + successors: + + ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp + BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $w0 + ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp + + bb.3.if.end.9: + successors: %bb.4(0x80000000) + + %28:gpr64common = ADRP target-flags(aarch64-page) @g1 + %29:gpr32 = LDRWui killed %28, target-flags(aarch64-pageoff, aarch64-nc) @g1 :: (dereferenceable load 4 from @g1) + %30:gpr64 = SUBREG_TO_REG 0, killed %29, %subreg.sub_32 + %0:gpr32 = COPY %30.sub_32 + %31:gpr32 = MOVi32imm 1125899907 + %32:gpr64 = SUBREG_TO_REG 0, killed %31, %subreg.sub_32 + %33:gpr64 = MADDXrrr %30, killed %32, $xzr + %34:gpr64 = UBFMXri killed %33, 50, 63 + %35:gpr32 = COPY %34.sub_32 + %36:gpr32 = MOVi32imm 1000000 + %38:gpr32 = MSUBWrrr killed %35, killed %36, %0 + %1:gpr64 = SUBREG_TO_REG 0, %38, %subreg.sub_32 + %39:gpr64common = ADRP target-flags(aarch64-page) @g13 + %40:gpr64 = LDRXui killed %39, target-flags(aarch64-pageoff, aarch64-nc) @g13 :: (dereferenceable load 8 from @g13) + %41:gpr32 = MOVi32imm 12 + %42:gpr64 = UMADDLrrr %38, killed %41, %40 + %3:gpr64sp = COPY %42 + %43:gpr64common = ADRP target-flags(aarch64-page) @g2 + %44:gpr32common = LDRBBui killed %43, target-flags(aarch64-pageoff, aarch64-nc) @g2 :: (dereferenceable load 1 from @g2) + dead $wzr = SUBSWri killed %44, 0, 0, implicit-def $nzcv + %46:gpr32 = COPY $wzr + %47:gpr32 = MOVi32imm 2 + %48:gpr32 = CSELWr killed %47, %46, 1, implicit $nzcv + %49:gpr64common = ADRP target-flags(aarch64-page) @g18 + %50:gpr32 = LDRWui killed %49, target-flags(aarch64-pageoff, aarch64-nc) @g18 :: (dereferenceable load 4 from @g18) + %51:gpr64common = ADRP target-flags(aarch64-page) @g11 + %52:gpr64 = LDRSWui killed %51, target-flags(aarch64-pageoff, aarch64-nc) @g11 :: (dereferenceable load 4 from @g11) + %6:gpr64 = COPY %52 + %53:gpr64common = ADRP target-flags(aarch64-page) @g21 + %54:gpr64 = LDRSWui killed %53, target-flags(aarch64-pageoff, aarch64-nc) @g21 :: (dereferenceable load 4 from @g21) + %55:gpr32 = MOVi32imm 1 + %27:gpr32all = COPY %55 + %26:gpr32all = COPY %46 + %7:gpr64 = COPY %54 + %56:gpr64common = ADRP target-flags(aarch64-page) @g17 + %61:gpr64common = MOVaddr target-flags(aarch64-page) @g3, target-flags(aarch64-pageoff, aarch64-nc) @g3 + %59:gpr32all = IMPLICIT_DEF + %65:gpr64 = MOVaddrJT target-flags(aarch64-page) %jump-table.0, target-flags(aarch64-pageoff, aarch64-nc) %jump-table.0 + %129:gpr64common = MOVaddr target-flags(aarch64-page) @g6, target-flags(aarch64-pageoff, aarch64-nc) @g6 + %132:gpr32 = MOVi32imm 144 + %133:gpr64 = SUBREG_TO_REG 0, %132, %subreg.sub_32 + %135:gpr64 = MOVaddr target-flags(aarch64-page) @g14, target-flags(aarch64-pageoff, aarch64-nc) @g14 + %128:gpr32all = IMPLICIT_DEF + %81:gpr64common = MOVaddr target-flags(aarch64-page) @g5, target-flags(aarch64-pageoff, aarch64-nc) @g5 + %80:gpr32all = IMPLICIT_DEF + %69:gpr64common = MOVaddr target-flags(aarch64-page) @g4, target-flags(aarch64-pageoff, aarch64-nc) @g4 + %74:gpr64 = MOVaddr target-flags(aarch64-page) @g20, target-flags(aarch64-pageoff, aarch64-nc) @g20 + %68:gpr32all = IMPLICIT_DEF + %120:gpr64common = MOVaddr target-flags(aarch64-page) @g10, target-flags(aarch64-pageoff, aarch64-nc) @g10 + %119:gpr32all = IMPLICIT_DEF + %108:gpr64common = MOVaddr target-flags(aarch64-page) @g9, target-flags(aarch64-pageoff, aarch64-nc) @g9 + %113:gpr64 = MOVaddr target-flags(aarch64-page) @g15, target-flags(aarch64-pageoff, aarch64-nc) @g15 + %107:gpr32all = IMPLICIT_DEF + %97:gpr64common = MOVaddr target-flags(aarch64-page) @g8, target-flags(aarch64-pageoff, aarch64-nc) @g8 + %103:gpr64 = MOVaddr target-flags(aarch64-page) @g16, target-flags(aarch64-pageoff, aarch64-nc) @g16 + %96:gpr32all = IMPLICIT_DEF + %124:gpr64common = MOVaddr target-flags(aarch64-page) @g7, target-flags(aarch64-pageoff, aarch64-nc) @g7 + %123:gpr32all = IMPLICIT_DEF + %85:gpr64common = MOVaddr target-flags(aarch64-page) @g19, target-flags(aarch64-pageoff, aarch64-nc) @g19 + %90:gpr64 = MOVaddr target-flags(aarch64-page) @g12, target-flags(aarch64-pageoff, aarch64-nc) @g12 + %84:gpr32all = IMPLICIT_DEF + + bb.4.for.body: + successors: %bb.14(0x40000000), %bb.5(0x40000000) + + %8:gpr32 = PHI %26, %bb.3, %19, %bb.14 + %9:gpr32all = PHI %27, %bb.3, %20, %bb.14 + %58:gpr32 = COPY $wzr + CBNZW %58, %bb.14 + B %bb.5 + + bb.5.if.else.51: + successors: %bb.14(0x071c71c7), %bb.17(0x78e38e39) + + %57:gpr64 = LDRSWui %56, target-flags(aarch64-pageoff, aarch64-nc) @g17 :: (load 4 from %ir.arrayidx) + %10:gpr64 = COPY %57 + %62:gpr32common = LDRWroX %61, %10, 0, 1 :: (load 4 from %ir.arrayidx54) + %63:gpr32common = SUBWri killed %62, 1, 0 + dead $wzr = SUBSWri %63, 11, 0, implicit-def $nzcv + Bcc 8, %bb.14, implicit $nzcv + + bb.17.if.else.51: + successors: %bb.6(0x0f0f0f0f), %bb.14(0x07878788), %bb.10(0x0f0f0f0f), %bb.11(0x0f0f0f0f), %bb.7(0x0f0f0f0f), %bb.8(0x0f0f0f0f), %bb.12(0x0f0f0f0f), %bb.13(0x0f0f0f0f), %bb.9(0x0f0f0f0f) + + %60:gpr64 = SUBREG_TO_REG 0, %63, %subreg.sub_32 + early-clobber %66:gpr64, early-clobber %67:gpr64sp = JumpTableDest32 %65, %60, %jump-table.0 + BR killed %66 + + bb.6.block.bb: + successors: %bb.14(0x80000000) + + %130:gpr32 = LDRWroX %129, %57, 0, 1 :: (load 4 from %ir.arrayidx56) + %131:gpr32 = ASRVWr killed %130, %48 + %136:gpr64common = MADDXrrr %57, %133, %135 + %137:gpr32 = LDRBBroX killed %136, %6, 0, 0 :: (load 1 from %ir.arrayidx61) + %138:gpr32 = nsw ADDWrr killed %137, killed %131 + %11:gpr32all = COPY %138 + B %bb.14 + + bb.7.block.bb.75: + successors: %bb.14(0x80000000) + + %121:gpr32 = LDRWroX %120, %57, 0, 1 :: (load 4 from %ir.arrayidx78) + %122:gpr32 = ASRVWr killed %121, %48 + %12:gpr32all = COPY %122 + B %bb.14 + + bb.8.block.bb.87: + successors: %bb.14(0x80000000) + + %109:gpr32 = LDRWroX %108, %57, 0, 1 :: (load 4 from %ir.arrayidx90) + %111:gpr64 = SUBREG_TO_REG 0, %132, %subreg.sub_32 + %114:gpr64common = MADDXrrr %57, killed %111, %113 + %115:gpr32 = LDRBBroX killed %114, %7, 0, 0 :: (load 1 from %ir.arrayidx96) + %116:gpr32 = ADDWrr killed %109, killed %115 + %117:gpr32 = COPY $wzr + %118:gpr32 = SUBWrr %117, killed %116 + %13:gpr32all = COPY %118 + B %bb.14 + + bb.9.block.bb.111: + successors: %bb.14(0x80000000) + + %86:gpr32 = LDRWroX %85, %57, 0, 1 :: (load 4 from %ir.arrayidx114) + %88:gpr64 = SUBREG_TO_REG 0, %132, %subreg.sub_32 + %91:gpr64common = MADDXrrr %57, killed %88, %90 + %92:gpr32 = LDRBBroX killed %91, %7, 0, 0 :: (load 1 from %ir.arrayidx120) + %93:gpr32 = ADDWrr killed %86, killed %92 + %94:gpr32 = COPY $wzr + %95:gpr32 = SUBWrr %94, killed %93 + %14:gpr32all = COPY %95 + B %bb.14 + + bb.10.block.bb.123: + successors: %bb.14(0x80000000) + + %82:gpr32 = LDRWroX %81, %57, 0, 1 :: (load 4 from %ir.arrayidx126) + %83:gpr32 = ASRVWr killed %82, %48 + %15:gpr32all = COPY %83 + B %bb.14 + + bb.11.block.bb.135: + successors: %bb.14(0x80000000) + + %70:gpr32 = LDRWroX %69, %57, 0, 1 :: (load 4 from %ir.arrayidx138) + %72:gpr64 = SUBREG_TO_REG 0, %132, %subreg.sub_32 + %75:gpr64common = MADDXrrr %57, killed %72, %74 + %76:gpr32 = LDRBBroX killed %75, %7, 0, 0 :: (load 1 from %ir.arrayidx144) + %77:gpr32 = ADDWrr killed %70, killed %76 + %78:gpr32 = COPY $wzr + %79:gpr32 = SUBWrr %78, killed %77 + %16:gpr32all = COPY %79 + B %bb.14 + + bb.12.block.bb.147: + successors: %bb.14(0x80000000) + + %98:gpr32 = LDRWroX %97, %57, 0, 1 :: (load 4 from %ir.arrayidx150) + %99:gpr32 = ASRVWr killed %98, %48 + %101:gpr64 = SUBREG_TO_REG 0, %132, %subreg.sub_32 + %104:gpr64common = MADDXrrr %57, killed %101, %103 + %105:gpr32 = LDRBBroX killed %104, %6, 0, 0 :: (load 1 from %ir.arrayidx156) + %106:gpr32 = nsw ADDWrr killed %105, killed %99 + %17:gpr32all = COPY %106 + B %bb.14 + + bb.13.block.bb.159: + successors: %bb.14(0x80000000) + + %125:gpr32 = LDRWroX %124, %57, 0, 1 :: (load 4 from %ir.arrayidx162) + %126:gpr32common = SUBWrr %8, killed %125 + %127:gpr32common = SUBWri killed %126, 450, 0 + %18:gpr32all = COPY %127 + + bb.14.for.inc: + successors: %bb.15(0x04000000), %bb.4(0x7c000000) + + %19:gpr32all = PHI %8, %bb.4, %8, %bb.5, %8, %bb.5, %8, %bb.17, %16, %bb.11, %15, %bb.10, %14, %bb.9, %17, %bb.12, %13, %bb.8, %12, %bb.7, %18, %bb.13, %11, %bb.6 + %20:gpr32 = PHI %9, %bb.4, %59, %bb.5, %59, %bb.5, %59, %bb.17, %68, %bb.11, %80, %bb.10, %84, %bb.9, %96, %bb.12, %107, %bb.8, %119, %bb.7, %123, %bb.13, %128, %bb.6 + dead $wzr = SUBSWrr %20, %50, implicit-def $nzcv + Bcc 13, %bb.4, implicit $nzcv + B %bb.15 + + bb.15.for.end: + STRWui %0, %3, 0 :: (store 4 into %ir.v4) + %140:gpr32 = MOVi32imm 12 + %141:gpr64 = SUBREG_TO_REG 0, killed %140, %subreg.sub_32 + %143:gpr64common = MADDXrrr %1, killed %141, %40 + %144:gpr32 = COPY $wzr + STRWui %144, killed %143, 1 :: (store 4 into %ir.hold_hash.i.7) + %145:gpr32all = IMPLICIT_DEF + $w0 = COPY %145 + RET_ReallyLR implicit $w0 + +...