diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8820,13 +8820,10 @@ EVT SetCCType = getSetCCResultType( DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); SDValue SetCC; - if (IsAdd && isOneConstant(RHS)) { - // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces - // the live range of X. We assume comparing with 0 is cheap. - // TODO: This generalizes to (X + C) < C. - SetCC = - DAG.getSetCC(dl, SetCCType, Result, - DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); + if (IsAdd && isa(RHS)) { + // Special case: uaddo X, C overflowed if X+C < C. This potential reduces + // the live range of X. + SetCC = DAG.getSetCC(dl, SetCCType, Result, RHS, ISD::SETULT); } else { ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6969,11 +6969,10 @@ DAG.getValueType(MVT::i32)); SDValue Overflow; - if (IsAdd && isOneConstant(RHS)) { - // Special case uaddo X, 1 overflowed if the addition result is 0. - // FIXME: We can do this for any constant RHS by using (X + C) < C. - Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, - DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ); + if (IsAdd && isa(RHS)) { + // Special case uaddo X, C overflowed if the addition result is less than + // C. + Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, RHS, ISD::SETULT); } else { // Sign extend the LHS and perform an unsigned compare with the ADDW // result. Since the inputs are sign extended from i32, this is equivalent diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -390,31 +390,29 @@ ; RV32-LABEL: uaddo.i32.constant: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, -2 -; RV32-NEXT: sltu a0, a2, a0 +; RV32-NEXT: sltiu a0, a2, -2 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32.constant: ; RV64: # %bb.0: # %entry -; RV64-NEXT: sext.w a2, a0 -; RV64-NEXT: addiw a3, a0, -2 -; RV64-NEXT: sltu a0, a3, a2 -; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: addiw a2, a0, -2 +; RV64-NEXT: sltiu a0, a2, -2 +; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, -2 -; RV32ZBA-NEXT: sltu a0, a2, a0 +; RV32ZBA-NEXT: sltiu a0, a2, -2 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32.constant: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: sext.w a2, a0 -; RV64ZBA-NEXT: addiw a3, a0, -2 -; RV64ZBA-NEXT: sltu a0, a3, a2 -; RV64ZBA-NEXT: sw a3, 0(a1) +; RV64ZBA-NEXT: addiw a2, a0, -2 +; RV64ZBA-NEXT: sltiu a0, a2, -2 +; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2) @@ -510,6 +508,56 @@ ret i1 %obit } +define zeroext i1 @uaddo.i64.constant(i64 %v1, i64* %res) { +; RV32-LABEL: uaddo.i64.constant: +; RV32: # %bb.0: # %entry +; RV32-NEXT: mv a3, a0 +; RV32-NEXT: addi a4, a0, 2 +; RV32-NEXT: sltu a0, a4, a0 +; RV32-NEXT: add a5, a1, a0 +; RV32-NEXT: bgeu a4, a3, .LBB11_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: sltu a0, a5, a1 +; RV32-NEXT: .LBB11_2: # %entry +; RV32-NEXT: sw a4, 0(a2) +; RV32-NEXT: sw a5, 4(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: uaddo.i64.constant: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a2, a0, 2 +; RV64-NEXT: sltiu a0, a2, 2 +; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: ret +; +; RV32ZBA-LABEL: uaddo.i64.constant: +; RV32ZBA: # %bb.0: # %entry +; RV32ZBA-NEXT: mv a3, a0 +; RV32ZBA-NEXT: addi a4, a0, 2 +; RV32ZBA-NEXT: sltu a0, a4, a0 +; RV32ZBA-NEXT: add a5, a1, a0 +; RV32ZBA-NEXT: bgeu a4, a3, .LBB11_2 +; RV32ZBA-NEXT: # %bb.1: # %entry +; RV32ZBA-NEXT: sltu a0, a5, a1 +; RV32ZBA-NEXT: .LBB11_2: # %entry +; RV32ZBA-NEXT: sw a4, 0(a2) +; RV32ZBA-NEXT: sw a5, 4(a2) +; RV32ZBA-NEXT: ret +; +; RV64ZBA-LABEL: uaddo.i64.constant: +; RV64ZBA: # %bb.0: # %entry +; RV64ZBA-NEXT: addi a2, a0, 2 +; RV64ZBA-NEXT: sltiu a0, a2, 2 +; RV64ZBA-NEXT: sd a2, 0(a1) +; RV64ZBA-NEXT: ret +entry: + %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2) + %val = extractvalue {i64, i1} %t, 0 + %obit = extractvalue {i64, i1} %t, 1 + store i64 %val, i64* %res + ret i1 %obit +} + define zeroext i1 @uaddo.i64.constant_one(i64 %v1, i64* %res) { ; RV32-LABEL: uaddo.i64.constant_one: ; RV32: # %bb.0: # %entry @@ -517,10 +565,10 @@ ; RV32-NEXT: addi a4, a0, 1 ; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: add a5, a1, a0 -; RV32-NEXT: bgeu a4, a3, .LBB11_2 +; RV32-NEXT: bgeu a4, a3, .LBB12_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a5, a1 -; RV32-NEXT: .LBB11_2: # %entry +; RV32-NEXT: .LBB12_2: # %entry ; RV32-NEXT: sw a4, 0(a2) ; RV32-NEXT: sw a5, 4(a2) ; RV32-NEXT: ret @@ -538,10 +586,10 @@ ; RV32ZBA-NEXT: addi a4, a0, 1 ; RV32ZBA-NEXT: sltu a0, a4, a0 ; RV32ZBA-NEXT: add a5, a1, a0 -; RV32ZBA-NEXT: bgeu a4, a3, .LBB11_2 +; RV32ZBA-NEXT: bgeu a4, a3, .LBB12_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a5, a1 -; RV32ZBA-NEXT: .LBB11_2: # %entry +; RV32ZBA-NEXT: .LBB12_2: # %entry ; RV32ZBA-NEXT: sw a4, 0(a2) ; RV32ZBA-NEXT: sw a5, 4(a2) ; RV32ZBA-NEXT: ret @@ -833,13 +881,13 @@ ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a5 ; RV32-NEXT: sub a2, a0, a2 -; RV32-NEXT: beq a3, a1, .LBB18_2 +; RV32-NEXT: beq a3, a1, .LBB19_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 -; RV32-NEXT: j .LBB18_3 -; RV32-NEXT: .LBB18_2: +; RV32-NEXT: j .LBB19_3 +; RV32-NEXT: .LBB19_2: ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: .LBB18_3: # %entry +; RV32-NEXT: .LBB19_3: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) ; RV32-NEXT: ret @@ -857,13 +905,13 @@ ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a5 ; RV32ZBA-NEXT: sub a2, a0, a2 -; RV32ZBA-NEXT: beq a3, a1, .LBB18_2 +; RV32ZBA-NEXT: beq a3, a1, .LBB19_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 -; RV32ZBA-NEXT: j .LBB18_3 -; RV32ZBA-NEXT: .LBB18_2: +; RV32ZBA-NEXT: j .LBB19_3 +; RV32ZBA-NEXT: .LBB19_2: ; RV32ZBA-NEXT: sltu a0, a0, a2 -; RV32ZBA-NEXT: .LBB18_3: # %entry +; RV32ZBA-NEXT: .LBB19_3: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) ; RV32ZBA-NEXT: ret @@ -1504,10 +1552,10 @@ ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a2, a2, a0 ; RV32-NEXT: slti a3, a1, 0 -; RV32-NEXT: bne a3, a2, .LBB28_2 +; RV32-NEXT: bne a3, a2, .LBB29_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB28_2: # %entry +; RV32-NEXT: .LBB29_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i32: @@ -1516,10 +1564,10 @@ ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: addw a2, a3, a2 -; RV64-NEXT: bne a2, a4, .LBB28_2 +; RV64-NEXT: bne a2, a4, .LBB29_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB28_2: # %entry +; RV64-NEXT: .LBB29_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i32: @@ -1527,10 +1575,10 @@ ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a2, a2, a0 ; RV32ZBA-NEXT: slti a3, a1, 0 -; RV32ZBA-NEXT: bne a3, a2, .LBB28_2 +; RV32ZBA-NEXT: bne a3, a2, .LBB29_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB28_2: # %entry +; RV32ZBA-NEXT: .LBB29_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i32: @@ -1539,10 +1587,10 @@ ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: add a4, a3, a2 ; RV64ZBA-NEXT: addw a2, a3, a2 -; RV64ZBA-NEXT: bne a2, a4, .LBB28_2 +; RV64ZBA-NEXT: bne a2, a4, .LBB29_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB28_2: # %entry +; RV64ZBA-NEXT: .LBB29_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1607,11 +1655,11 @@ ; RV32-NEXT: xor a5, a1, a3 ; RV32-NEXT: not a5, a5 ; RV32-NEXT: and a4, a5, a4 -; RV32-NEXT: bltz a4, .LBB30_2 +; RV32-NEXT: bltz a4, .LBB31_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB30_2: # %entry +; RV32-NEXT: .LBB31_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i64: @@ -1619,10 +1667,10 @@ ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a2, a2, a0 ; RV64-NEXT: slti a3, a1, 0 -; RV64-NEXT: bne a3, a2, .LBB30_2 +; RV64-NEXT: bne a3, a2, .LBB31_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB30_2: # %entry +; RV64-NEXT: .LBB31_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i64: @@ -1635,11 +1683,11 @@ ; RV32ZBA-NEXT: xor a5, a1, a3 ; RV32ZBA-NEXT: not a5, a5 ; RV32ZBA-NEXT: and a4, a5, a4 -; RV32ZBA-NEXT: bltz a4, .LBB30_2 +; RV32ZBA-NEXT: bltz a4, .LBB31_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB30_2: # %entry +; RV32ZBA-NEXT: .LBB31_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i64: @@ -1647,10 +1695,10 @@ ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a2, a2, a0 ; RV64ZBA-NEXT: slti a3, a1, 0 -; RV64ZBA-NEXT: bne a3, a2, .LBB30_2 +; RV64ZBA-NEXT: bne a3, a2, .LBB31_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB30_2: # %entry +; RV64ZBA-NEXT: .LBB31_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1716,39 +1764,39 @@ ; RV32-LABEL: uaddo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 -; RV32-NEXT: bltu a2, a0, .LBB32_2 +; RV32-NEXT: bltu a2, a0, .LBB33_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB32_2: # %entry +; RV32-NEXT: .LBB33_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a2, a0, a1 ; RV64-NEXT: sext.w a3, a0 -; RV64-NEXT: bltu a2, a3, .LBB32_2 +; RV64-NEXT: bltu a2, a3, .LBB33_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB32_2: # %entry +; RV64-NEXT: .LBB33_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a1 -; RV32ZBA-NEXT: bltu a2, a0, .LBB32_2 +; RV32ZBA-NEXT: bltu a2, a0, .LBB33_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB32_2: # %entry +; RV32ZBA-NEXT: .LBB33_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a2, a0, a1 ; RV64ZBA-NEXT: sext.w a3, a0 -; RV64ZBA-NEXT: bltu a2, a3, .LBB32_2 +; RV64ZBA-NEXT: bltu a2, a3, .LBB33_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB32_2: # %entry +; RV64ZBA-NEXT: .LBB33_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1801,15 +1849,15 @@ ; RV32-NEXT: add a4, a0, a2 ; RV32-NEXT: sltu a4, a4, a0 ; RV32-NEXT: add a5, a5, a4 -; RV32-NEXT: bne a5, a1, .LBB34_3 +; RV32-NEXT: bne a5, a1, .LBB35_3 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: beqz a4, .LBB34_4 -; RV32-NEXT: .LBB34_2: # %entry +; RV32-NEXT: beqz a4, .LBB35_4 +; RV32-NEXT: .LBB35_2: # %entry ; RV32-NEXT: ret -; RV32-NEXT: .LBB34_3: # %entry +; RV32-NEXT: .LBB35_3: # %entry ; RV32-NEXT: sltu a4, a5, a1 -; RV32-NEXT: bnez a4, .LBB34_2 -; RV32-NEXT: .LBB34_4: # %entry +; RV32-NEXT: bnez a4, .LBB35_2 +; RV32-NEXT: .LBB35_4: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: ret @@ -1817,10 +1865,10 @@ ; RV64-LABEL: uaddo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 -; RV64-NEXT: bltu a2, a0, .LBB34_2 +; RV64-NEXT: bltu a2, a0, .LBB35_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB34_2: # %entry +; RV64-NEXT: .LBB35_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i64: @@ -1829,15 +1877,15 @@ ; RV32ZBA-NEXT: add a4, a0, a2 ; RV32ZBA-NEXT: sltu a4, a4, a0 ; RV32ZBA-NEXT: add a5, a5, a4 -; RV32ZBA-NEXT: bne a5, a1, .LBB34_3 +; RV32ZBA-NEXT: bne a5, a1, .LBB35_3 ; RV32ZBA-NEXT: # %bb.1: # %entry -; RV32ZBA-NEXT: beqz a4, .LBB34_4 -; RV32ZBA-NEXT: .LBB34_2: # %entry +; RV32ZBA-NEXT: beqz a4, .LBB35_4 +; RV32ZBA-NEXT: .LBB35_2: # %entry ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB34_3: # %entry +; RV32ZBA-NEXT: .LBB35_3: # %entry ; RV32ZBA-NEXT: sltu a4, a5, a1 -; RV32ZBA-NEXT: bnez a4, .LBB34_2 -; RV32ZBA-NEXT: .LBB34_4: # %entry +; RV32ZBA-NEXT: bnez a4, .LBB35_2 +; RV32ZBA-NEXT: .LBB35_4: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: ret @@ -1845,10 +1893,10 @@ ; RV64ZBA-LABEL: uaddo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a2, a0, a1 -; RV64ZBA-NEXT: bltu a2, a0, .LBB34_2 +; RV64ZBA-NEXT: bltu a2, a0, .LBB35_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB34_2: # %entry +; RV64ZBA-NEXT: .LBB35_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1864,10 +1912,10 @@ ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a3, a0 -; RV32-NEXT: beq a2, a1, .LBB35_2 +; RV32-NEXT: beq a2, a1, .LBB36_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 -; RV32-NEXT: .LBB35_2: # %entry +; RV32-NEXT: .LBB36_2: # %entry ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; @@ -1884,10 +1932,10 @@ ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a3, a0 -; RV32ZBA-NEXT: beq a2, a1, .LBB35_2 +; RV32ZBA-NEXT: beq a2, a1, .LBB36_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 -; RV32ZBA-NEXT: .LBB35_2: # %entry +; RV32ZBA-NEXT: .LBB36_2: # %entry ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; @@ -1910,10 +1958,10 @@ ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a3, a0, a1 ; RV32-NEXT: slt a3, a3, a0 -; RV32-NEXT: bne a2, a3, .LBB36_2 +; RV32-NEXT: bne a2, a3, .LBB37_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB36_2: # %entry +; RV32-NEXT: .LBB37_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i32: @@ -1922,10 +1970,10 @@ ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: sub a4, a3, a2 ; RV64-NEXT: subw a2, a3, a2 -; RV64-NEXT: bne a2, a4, .LBB36_2 +; RV64-NEXT: bne a2, a4, .LBB37_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB36_2: # %entry +; RV64-NEXT: .LBB37_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i32: @@ -1933,10 +1981,10 @@ ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a3, a0, a1 ; RV32ZBA-NEXT: slt a3, a3, a0 -; RV32ZBA-NEXT: bne a2, a3, .LBB36_2 +; RV32ZBA-NEXT: bne a2, a3, .LBB37_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB36_2: # %entry +; RV32ZBA-NEXT: .LBB37_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i32: @@ -1945,10 +1993,10 @@ ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: sub a4, a3, a2 ; RV64ZBA-NEXT: subw a2, a3, a2 -; RV64ZBA-NEXT: bne a2, a4, .LBB36_2 +; RV64ZBA-NEXT: bne a2, a4, .LBB37_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB36_2: # %entry +; RV64ZBA-NEXT: .LBB37_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -2011,11 +2059,11 @@ ; RV32-NEXT: xor a4, a1, a4 ; RV32-NEXT: xor a5, a1, a3 ; RV32-NEXT: and a4, a5, a4 -; RV32-NEXT: bltz a4, .LBB38_2 +; RV32-NEXT: bltz a4, .LBB39_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB38_2: # %entry +; RV32-NEXT: .LBB39_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i64: @@ -2023,10 +2071,10 @@ ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a3, a0, a1 ; RV64-NEXT: slt a3, a3, a0 -; RV64-NEXT: bne a2, a3, .LBB38_2 +; RV64-NEXT: bne a2, a3, .LBB39_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB38_2: # %entry +; RV64-NEXT: .LBB39_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i64: @@ -2037,11 +2085,11 @@ ; RV32ZBA-NEXT: xor a4, a1, a4 ; RV32ZBA-NEXT: xor a5, a1, a3 ; RV32ZBA-NEXT: and a4, a5, a4 -; RV32ZBA-NEXT: bltz a4, .LBB38_2 +; RV32ZBA-NEXT: bltz a4, .LBB39_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB38_2: # %entry +; RV32ZBA-NEXT: .LBB39_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i64: @@ -2049,10 +2097,10 @@ ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a3, a0, a1 ; RV64ZBA-NEXT: slt a3, a3, a0 -; RV64ZBA-NEXT: bne a2, a3, .LBB38_2 +; RV64ZBA-NEXT: bne a2, a3, .LBB39_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB38_2: # %entry +; RV64ZBA-NEXT: .LBB39_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2114,39 +2162,39 @@ ; RV32-LABEL: usubo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a2, a0, a1 -; RV32-NEXT: bltu a0, a2, .LBB40_2 +; RV32-NEXT: bltu a0, a2, .LBB41_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB40_2: # %entry +; RV32-NEXT: .LBB41_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a2, a0, a1 ; RV64-NEXT: sext.w a3, a0 -; RV64-NEXT: bltu a3, a2, .LBB40_2 +; RV64-NEXT: bltu a3, a2, .LBB41_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB40_2: # %entry +; RV64-NEXT: .LBB41_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a2, a0, a1 -; RV32ZBA-NEXT: bltu a0, a2, .LBB40_2 +; RV32ZBA-NEXT: bltu a0, a2, .LBB41_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB40_2: # %entry +; RV32ZBA-NEXT: .LBB41_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a2, a0, a1 ; RV64ZBA-NEXT: sext.w a3, a0 -; RV64ZBA-NEXT: bltu a3, a2, .LBB40_2 +; RV64ZBA-NEXT: bltu a3, a2, .LBB41_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB40_2: # %entry +; RV64ZBA-NEXT: .LBB41_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -2198,28 +2246,28 @@ ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a5, a1, a3 ; RV32-NEXT: sub a4, a5, a4 -; RV32-NEXT: beq a4, a1, .LBB42_2 +; RV32-NEXT: beq a4, a1, .LBB43_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a4, a1, a4 -; RV32-NEXT: beqz a4, .LBB42_3 -; RV32-NEXT: j .LBB42_4 -; RV32-NEXT: .LBB42_2: +; RV32-NEXT: beqz a4, .LBB43_3 +; RV32-NEXT: j .LBB43_4 +; RV32-NEXT: .LBB43_2: ; RV32-NEXT: sub a4, a0, a2 ; RV32-NEXT: sltu a4, a0, a4 -; RV32-NEXT: bnez a4, .LBB42_4 -; RV32-NEXT: .LBB42_3: # %entry +; RV32-NEXT: bnez a4, .LBB43_4 +; RV32-NEXT: .LBB43_3: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB42_4: # %entry +; RV32-NEXT: .LBB43_4: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a2, a0, a1 -; RV64-NEXT: bltu a0, a2, .LBB42_2 +; RV64-NEXT: bltu a0, a2, .LBB43_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB42_2: # %entry +; RV64-NEXT: .LBB43_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i64: @@ -2227,28 +2275,28 @@ ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a5, a1, a3 ; RV32ZBA-NEXT: sub a4, a5, a4 -; RV32ZBA-NEXT: beq a4, a1, .LBB42_2 +; RV32ZBA-NEXT: beq a4, a1, .LBB43_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a4, a1, a4 -; RV32ZBA-NEXT: beqz a4, .LBB42_3 -; RV32ZBA-NEXT: j .LBB42_4 -; RV32ZBA-NEXT: .LBB42_2: +; RV32ZBA-NEXT: beqz a4, .LBB43_3 +; RV32ZBA-NEXT: j .LBB43_4 +; RV32ZBA-NEXT: .LBB43_2: ; RV32ZBA-NEXT: sub a4, a0, a2 ; RV32ZBA-NEXT: sltu a4, a0, a4 -; RV32ZBA-NEXT: bnez a4, .LBB42_4 -; RV32ZBA-NEXT: .LBB42_3: # %entry +; RV32ZBA-NEXT: bnez a4, .LBB43_4 +; RV32ZBA-NEXT: .LBB43_3: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB42_4: # %entry +; RV32ZBA-NEXT: .LBB43_4: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a2, a0, a1 -; RV64ZBA-NEXT: bltu a0, a2, .LBB42_2 +; RV64ZBA-NEXT: bltu a0, a2, .LBB43_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB42_2: # %entry +; RV64ZBA-NEXT: .LBB43_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2263,12 +2311,12 @@ ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a4 -; RV32-NEXT: beq a3, a1, .LBB43_2 +; RV32-NEXT: beq a3, a1, .LBB44_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret -; RV32-NEXT: .LBB43_2: +; RV32-NEXT: .LBB44_2: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 @@ -2286,12 +2334,12 @@ ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a4 -; RV32ZBA-NEXT: beq a3, a1, .LBB43_2 +; RV32ZBA-NEXT: beq a3, a1, .LBB44_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB43_2: +; RV32ZBA-NEXT: .LBB44_2: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 @@ -2316,10 +2364,10 @@ ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a3, a0, a1 ; RV32-NEXT: srai a3, a3, 31 -; RV32-NEXT: bne a2, a3, .LBB44_2 +; RV32-NEXT: bne a2, a3, .LBB45_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB44_2: # %entry +; RV32-NEXT: .LBB45_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i32: @@ -2328,10 +2376,10 @@ ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: mul a4, a3, a2 ; RV64-NEXT: mulw a2, a3, a2 -; RV64-NEXT: bne a2, a4, .LBB44_2 +; RV64-NEXT: bne a2, a4, .LBB45_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB44_2: # %entry +; RV64-NEXT: .LBB45_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i32: @@ -2339,10 +2387,10 @@ ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a3, a0, a1 ; RV32ZBA-NEXT: srai a3, a3, 31 -; RV32ZBA-NEXT: bne a2, a3, .LBB44_2 +; RV32ZBA-NEXT: bne a2, a3, .LBB45_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB44_2: # %entry +; RV32ZBA-NEXT: .LBB45_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i32: @@ -2351,10 +2399,10 @@ ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: mul a4, a3, a2 ; RV64ZBA-NEXT: mulw a2, a3, a2 -; RV64ZBA-NEXT: bne a2, a4, .LBB44_2 +; RV64ZBA-NEXT: bne a2, a4, .LBB45_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB44_2: # %entry +; RV64ZBA-NEXT: .LBB45_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2458,11 +2506,11 @@ ; RV32-NEXT: xor a5, a5, a4 ; RV32-NEXT: xor a4, t6, a4 ; RV32-NEXT: or a4, a4, a5 -; RV32-NEXT: bnez a4, .LBB46_2 +; RV32-NEXT: bnez a4, .LBB47_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB46_2: # %entry +; RV32-NEXT: .LBB47_2: # %entry ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2472,10 +2520,10 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a3, a0, a1 ; RV64-NEXT: srai a3, a3, 63 -; RV64-NEXT: bne a2, a3, .LBB46_2 +; RV64-NEXT: bne a2, a3, .LBB47_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB46_2: # %entry +; RV64-NEXT: .LBB47_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i64: @@ -2527,11 +2575,11 @@ ; RV32ZBA-NEXT: xor a5, a5, a4 ; RV32ZBA-NEXT: xor a4, t6, a4 ; RV32ZBA-NEXT: or a4, a4, a5 -; RV32ZBA-NEXT: bnez a4, .LBB46_2 +; RV32ZBA-NEXT: bnez a4, .LBB47_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB46_2: # %entry +; RV32ZBA-NEXT: .LBB47_2: # %entry ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret @@ -2541,10 +2589,10 @@ ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a3, a0, a1 ; RV64ZBA-NEXT: srai a3, a3, 63 -; RV64ZBA-NEXT: bne a2, a3, .LBB46_2 +; RV64ZBA-NEXT: bne a2, a3, .LBB47_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB46_2: # %entry +; RV64ZBA-NEXT: .LBB47_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2690,10 +2738,10 @@ ; RV32-LABEL: umulo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a2, a0, a1 -; RV32-NEXT: bnez a2, .LBB48_2 +; RV32-NEXT: bnez a2, .LBB49_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB48_2: # %entry +; RV32-NEXT: .LBB49_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i32: @@ -2702,19 +2750,19 @@ ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: mulhu a2, a3, a2 ; RV64-NEXT: srli a2, a2, 32 -; RV64-NEXT: bnez a2, .LBB48_2 +; RV64-NEXT: bnez a2, .LBB49_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB48_2: # %entry +; RV64-NEXT: .LBB49_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a2, a0, a1 -; RV32ZBA-NEXT: bnez a2, .LBB48_2 +; RV32ZBA-NEXT: bnez a2, .LBB49_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB48_2: # %entry +; RV32ZBA-NEXT: .LBB49_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i32: @@ -2723,10 +2771,10 @@ ; RV64ZBA-NEXT: zext.w a3, a0 ; RV64ZBA-NEXT: mul a2, a3, a2 ; RV64ZBA-NEXT: srli a2, a2, 32 -; RV64ZBA-NEXT: bnez a2, .LBB48_2 +; RV64ZBA-NEXT: bnez a2, .LBB49_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB48_2: # %entry +; RV64ZBA-NEXT: .LBB49_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2791,20 +2839,20 @@ ; RV32-NEXT: snez a6, a6 ; RV32-NEXT: or a5, a5, a6 ; RV32-NEXT: or a4, a5, a4 -; RV32-NEXT: bnez a4, .LBB50_2 +; RV32-NEXT: bnez a4, .LBB51_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB50_2: # %entry +; RV32-NEXT: .LBB51_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a2, a0, a1 -; RV64-NEXT: bnez a2, .LBB50_2 +; RV64-NEXT: bnez a2, .LBB51_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB50_2: # %entry +; RV64-NEXT: .LBB51_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i64: @@ -2825,20 +2873,20 @@ ; RV32ZBA-NEXT: snez a6, a6 ; RV32ZBA-NEXT: or a5, a5, a6 ; RV32ZBA-NEXT: or a4, a5, a4 -; RV32ZBA-NEXT: bnez a4, .LBB50_2 +; RV32ZBA-NEXT: bnez a4, .LBB51_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB50_2: # %entry +; RV32ZBA-NEXT: .LBB51_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a2, a0, a1 -; RV64ZBA-NEXT: bnez a2, .LBB50_2 +; RV64ZBA-NEXT: bnez a2, .LBB51_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB50_2: # %entry +; RV64ZBA-NEXT: .LBB51_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2918,11 +2966,11 @@ ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: slti a1, a1, 0 -; RV32-NEXT: beq a1, a0, .LBB52_2 +; RV32-NEXT: beq a1, a0, .LBB53_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB52_2: # %continue +; RV32-NEXT: .LBB53_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -2932,11 +2980,11 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: addw a0, a0, a1 -; RV64-NEXT: beq a0, a2, .LBB52_2 +; RV64-NEXT: beq a0, a2, .LBB53_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB52_2: # %continue +; RV64-NEXT: .LBB53_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -2945,11 +2993,11 @@ ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: slti a1, a1, 0 -; RV32ZBA-NEXT: beq a1, a0, .LBB52_2 +; RV32ZBA-NEXT: beq a1, a0, .LBB53_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB52_2: # %continue +; RV32ZBA-NEXT: .LBB53_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -2959,11 +3007,11 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: addw a0, a0, a1 -; RV64ZBA-NEXT: beq a0, a2, .LBB52_2 +; RV64ZBA-NEXT: beq a0, a2, .LBB53_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB52_2: # %continue +; RV64ZBA-NEXT: .LBB53_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -2990,11 +3038,11 @@ ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: bgez a0, .LBB53_2 +; RV32-NEXT: bgez a0, .LBB54_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB53_2: # %continue +; RV32-NEXT: .LBB54_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3003,11 +3051,11 @@ ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: slti a1, a1, 0 -; RV64-NEXT: beq a1, a0, .LBB53_2 +; RV64-NEXT: beq a1, a0, .LBB54_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB53_2: # %continue +; RV64-NEXT: .LBB54_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3021,11 +3069,11 @@ ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: bgez a0, .LBB53_2 +; RV32ZBA-NEXT: bgez a0, .LBB54_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB53_2: # %continue +; RV32ZBA-NEXT: .LBB54_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3034,11 +3082,11 @@ ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: slti a1, a1, 0 -; RV64ZBA-NEXT: beq a1, a0, .LBB53_2 +; RV64ZBA-NEXT: beq a1, a0, .LBB54_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB53_2: # %continue +; RV64ZBA-NEXT: .LBB54_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3058,11 +3106,11 @@ ; RV32-LABEL: uaddo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a1, a0, a1 -; RV32-NEXT: bgeu a1, a0, .LBB54_2 +; RV32-NEXT: bgeu a1, a0, .LBB55_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB54_2: # %continue +; RV32-NEXT: .LBB55_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3070,22 +3118,22 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: bgeu a1, a0, .LBB54_2 +; RV64-NEXT: bgeu a1, a0, .LBB55_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB54_2: # %continue +; RV64-NEXT: .LBB55_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a1, a0, a1 -; RV32ZBA-NEXT: bgeu a1, a0, .LBB54_2 +; RV32ZBA-NEXT: bgeu a1, a0, .LBB55_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB54_2: # %continue +; RV32ZBA-NEXT: .LBB55_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3093,11 +3141,11 @@ ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 -; RV64ZBA-NEXT: bgeu a1, a0, .LBB54_2 +; RV64ZBA-NEXT: bgeu a1, a0, .LBB55_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB54_2: # %continue +; RV64ZBA-NEXT: .LBB55_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3120,26 +3168,26 @@ ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a3, a0 -; RV32-NEXT: beq a2, a1, .LBB55_2 +; RV32-NEXT: beq a2, a1, .LBB56_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 -; RV32-NEXT: .LBB55_2: # %entry -; RV32-NEXT: beqz a0, .LBB55_4 +; RV32-NEXT: .LBB56_2: # %entry +; RV32-NEXT: beqz a0, .LBB56_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB55_4: # %continue +; RV32-NEXT: .LBB56_4: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a1 -; RV64-NEXT: bgeu a1, a0, .LBB55_2 +; RV64-NEXT: bgeu a1, a0, .LBB56_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB55_2: # %continue +; RV64-NEXT: .LBB56_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3149,26 +3197,26 @@ ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a3, a0 -; RV32ZBA-NEXT: beq a2, a1, .LBB55_2 +; RV32ZBA-NEXT: beq a2, a1, .LBB56_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 -; RV32ZBA-NEXT: .LBB55_2: # %entry -; RV32ZBA-NEXT: beqz a0, .LBB55_4 +; RV32ZBA-NEXT: .LBB56_2: # %entry +; RV32ZBA-NEXT: beqz a0, .LBB56_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB55_4: # %continue +; RV32ZBA-NEXT: .LBB56_4: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a1 -; RV64ZBA-NEXT: bgeu a1, a0, .LBB55_2 +; RV64ZBA-NEXT: bgeu a1, a0, .LBB56_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB55_2: # %continue +; RV64ZBA-NEXT: .LBB56_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3190,11 +3238,11 @@ ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: slt a0, a1, a0 -; RV32-NEXT: beq a2, a0, .LBB56_2 +; RV32-NEXT: beq a2, a0, .LBB57_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB56_2: # %continue +; RV32-NEXT: .LBB57_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3204,11 +3252,11 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sub a2, a0, a1 ; RV64-NEXT: subw a0, a0, a1 -; RV64-NEXT: beq a0, a2, .LBB56_2 +; RV64-NEXT: beq a0, a2, .LBB57_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB56_2: # %continue +; RV64-NEXT: .LBB57_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3217,11 +3265,11 @@ ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: slt a0, a1, a0 -; RV32ZBA-NEXT: beq a2, a0, .LBB56_2 +; RV32ZBA-NEXT: beq a2, a0, .LBB57_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB56_2: # %continue +; RV32ZBA-NEXT: .LBB57_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3231,11 +3279,11 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sub a2, a0, a1 ; RV64ZBA-NEXT: subw a0, a0, a1 -; RV64ZBA-NEXT: beq a0, a2, .LBB56_2 +; RV64ZBA-NEXT: beq a0, a2, .LBB57_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB56_2: # %continue +; RV64ZBA-NEXT: .LBB57_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3260,11 +3308,11 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: bgez a0, .LBB57_2 +; RV32-NEXT: bgez a0, .LBB58_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB57_2: # %continue +; RV32-NEXT: .LBB58_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3273,11 +3321,11 @@ ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: slt a0, a1, a0 -; RV64-NEXT: beq a2, a0, .LBB57_2 +; RV64-NEXT: beq a2, a0, .LBB58_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB57_2: # %continue +; RV64-NEXT: .LBB58_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3289,11 +3337,11 @@ ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: bgez a0, .LBB57_2 +; RV32ZBA-NEXT: bgez a0, .LBB58_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB57_2: # %continue +; RV32ZBA-NEXT: .LBB58_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3302,11 +3350,11 @@ ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: slt a0, a1, a0 -; RV64ZBA-NEXT: beq a2, a0, .LBB57_2 +; RV64ZBA-NEXT: beq a2, a0, .LBB58_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB57_2: # %continue +; RV64ZBA-NEXT: .LBB58_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3326,11 +3374,11 @@ ; RV32-LABEL: usubo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a1, a0, a1 -; RV32-NEXT: bgeu a0, a1, .LBB58_2 +; RV32-NEXT: bgeu a0, a1, .LBB59_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB58_2: # %continue +; RV32-NEXT: .LBB59_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3338,22 +3386,22 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: bgeu a0, a1, .LBB58_2 +; RV64-NEXT: bgeu a0, a1, .LBB59_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB58_2: # %continue +; RV64-NEXT: .LBB59_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a1, a0, a1 -; RV32ZBA-NEXT: bgeu a0, a1, .LBB58_2 +; RV32ZBA-NEXT: bgeu a0, a1, .LBB59_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB58_2: # %continue +; RV32ZBA-NEXT: .LBB59_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3361,11 +3409,11 @@ ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 -; RV64ZBA-NEXT: bgeu a0, a1, .LBB58_2 +; RV64ZBA-NEXT: bgeu a0, a1, .LBB59_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB58_2: # %continue +; RV64ZBA-NEXT: .LBB59_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3387,29 +3435,29 @@ ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a4 -; RV32-NEXT: beq a3, a1, .LBB59_3 +; RV32-NEXT: beq a3, a1, .LBB60_3 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 -; RV32-NEXT: bnez a0, .LBB59_4 -; RV32-NEXT: .LBB59_2: # %continue +; RV32-NEXT: bnez a0, .LBB60_4 +; RV32-NEXT: .LBB60_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret -; RV32-NEXT: .LBB59_3: +; RV32-NEXT: .LBB60_3: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 -; RV32-NEXT: beqz a0, .LBB59_2 -; RV32-NEXT: .LBB59_4: # %overflow +; RV32-NEXT: beqz a0, .LBB60_2 +; RV32-NEXT: .LBB60_4: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a1, a0, a1 -; RV64-NEXT: bgeu a0, a1, .LBB59_2 +; RV64-NEXT: bgeu a0, a1, .LBB60_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB59_2: # %continue +; RV64-NEXT: .LBB60_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3418,29 +3466,29 @@ ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a4 -; RV32ZBA-NEXT: beq a3, a1, .LBB59_3 +; RV32ZBA-NEXT: beq a3, a1, .LBB60_3 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 -; RV32ZBA-NEXT: bnez a0, .LBB59_4 -; RV32ZBA-NEXT: .LBB59_2: # %continue +; RV32ZBA-NEXT: bnez a0, .LBB60_4 +; RV32ZBA-NEXT: .LBB60_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB59_3: +; RV32ZBA-NEXT: .LBB60_3: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 -; RV32ZBA-NEXT: beqz a0, .LBB59_2 -; RV32ZBA-NEXT: .LBB59_4: # %overflow +; RV32ZBA-NEXT: beqz a0, .LBB60_2 +; RV32ZBA-NEXT: .LBB60_4: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a1, a0, a1 -; RV64ZBA-NEXT: bgeu a0, a1, .LBB59_2 +; RV64ZBA-NEXT: bgeu a0, a1, .LBB60_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB59_2: # %continue +; RV64ZBA-NEXT: .LBB60_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3462,11 +3510,11 @@ ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: srai a0, a0, 31 -; RV32-NEXT: beq a2, a0, .LBB60_2 +; RV32-NEXT: beq a2, a0, .LBB61_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB60_2: # %continue +; RV32-NEXT: .LBB61_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3476,11 +3524,11 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: mul a2, a0, a1 ; RV64-NEXT: mulw a0, a0, a1 -; RV64-NEXT: beq a0, a2, .LBB60_2 +; RV64-NEXT: beq a0, a2, .LBB61_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB60_2: # %continue +; RV64-NEXT: .LBB61_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3489,11 +3537,11 @@ ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: srai a0, a0, 31 -; RV32ZBA-NEXT: beq a2, a0, .LBB60_2 +; RV32ZBA-NEXT: beq a2, a0, .LBB61_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB60_2: # %continue +; RV32ZBA-NEXT: .LBB61_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3503,11 +3551,11 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: mul a2, a0, a1 ; RV64ZBA-NEXT: mulw a0, a0, a1 -; RV64ZBA-NEXT: beq a0, a2, .LBB60_2 +; RV64ZBA-NEXT: beq a0, a2, .LBB61_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB60_2: # %continue +; RV64ZBA-NEXT: .LBB61_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3573,13 +3621,13 @@ ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t6, a1 ; RV32-NEXT: or a0, a1, a0 -; RV32-NEXT: beqz a0, .LBB61_2 +; RV32-NEXT: beqz a0, .LBB62_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 -; RV32-NEXT: j .LBB61_3 -; RV32-NEXT: .LBB61_2: # %continue +; RV32-NEXT: j .LBB62_3 +; RV32-NEXT: .LBB62_2: # %continue ; RV32-NEXT: li a0, 1 -; RV32-NEXT: .LBB61_3: # %overflow +; RV32-NEXT: .LBB62_3: # %overflow ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -3589,11 +3637,11 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 -; RV64-NEXT: beq a2, a0, .LBB61_2 +; RV64-NEXT: beq a2, a0, .LBB62_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB61_2: # %continue +; RV64-NEXT: .LBB62_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3646,13 +3694,13 @@ ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t6, a1 ; RV32ZBA-NEXT: or a0, a1, a0 -; RV32ZBA-NEXT: beqz a0, .LBB61_2 +; RV32ZBA-NEXT: beqz a0, .LBB62_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 -; RV32ZBA-NEXT: j .LBB61_3 -; RV32ZBA-NEXT: .LBB61_2: # %continue +; RV32ZBA-NEXT: j .LBB62_3 +; RV32ZBA-NEXT: .LBB62_2: # %continue ; RV32ZBA-NEXT: li a0, 1 -; RV32ZBA-NEXT: .LBB61_3: # %overflow +; RV32ZBA-NEXT: .LBB62_3: # %overflow ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret @@ -3662,11 +3710,11 @@ ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 -; RV64ZBA-NEXT: beq a2, a0, .LBB61_2 +; RV64ZBA-NEXT: beq a2, a0, .LBB62_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB61_2: # %continue +; RV64ZBA-NEXT: .LBB62_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3723,11 +3771,11 @@ ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t4, a1 ; RV32-NEXT: or a0, a1, a0 -; RV32-NEXT: beqz a0, .LBB62_2 +; RV32-NEXT: beqz a0, .LBB63_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB62_2: # %continue +; RV32-NEXT: .LBB63_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3737,11 +3785,11 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 -; RV64-NEXT: beq a2, a0, .LBB62_2 +; RV64-NEXT: beq a2, a0, .LBB63_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB62_2: # %continue +; RV64-NEXT: .LBB63_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3785,11 +3833,11 @@ ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t4, a1 ; RV32ZBA-NEXT: or a0, a1, a0 -; RV32ZBA-NEXT: beqz a0, .LBB62_2 +; RV32ZBA-NEXT: beqz a0, .LBB63_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB62_2: # %continue +; RV32ZBA-NEXT: .LBB63_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3799,11 +3847,11 @@ ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 -; RV64ZBA-NEXT: beq a2, a0, .LBB62_2 +; RV64ZBA-NEXT: beq a2, a0, .LBB63_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB62_2: # %continue +; RV64ZBA-NEXT: .LBB63_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3823,11 +3871,11 @@ ; RV32-LABEL: umulo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a0, a0, a1 -; RV32-NEXT: beqz a0, .LBB63_2 +; RV32-NEXT: beqz a0, .LBB64_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB63_2: # %continue +; RV32-NEXT: .LBB64_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3837,22 +3885,22 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: beqz a0, .LBB63_2 +; RV64-NEXT: beqz a0, .LBB64_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB63_2: # %continue +; RV64-NEXT: .LBB64_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a0, a0, a1 -; RV32ZBA-NEXT: beqz a0, .LBB63_2 +; RV32ZBA-NEXT: beqz a0, .LBB64_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB63_2: # %continue +; RV32ZBA-NEXT: .LBB64_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3862,11 +3910,11 @@ ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srli a0, a0, 32 -; RV64ZBA-NEXT: beqz a0, .LBB63_2 +; RV64ZBA-NEXT: beqz a0, .LBB64_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB63_2: # %continue +; RV64ZBA-NEXT: .LBB64_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3901,22 +3949,22 @@ ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: or a0, a0, a4 -; RV32-NEXT: beqz a0, .LBB64_2 +; RV32-NEXT: beqz a0, .LBB65_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB64_2: # %continue +; RV32-NEXT: .LBB65_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a0, a0, a1 -; RV64-NEXT: beqz a0, .LBB64_2 +; RV64-NEXT: beqz a0, .LBB65_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB64_2: # %continue +; RV64-NEXT: .LBB65_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3938,22 +3986,22 @@ ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: or a0, a0, a4 -; RV32ZBA-NEXT: beqz a0, .LBB64_2 +; RV32ZBA-NEXT: beqz a0, .LBB65_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB64_2: # %continue +; RV32ZBA-NEXT: .LBB65_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a0, a0, a1 -; RV64ZBA-NEXT: beqz a0, .LBB64_2 +; RV64ZBA-NEXT: beqz a0, .LBB65_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB64_2: # %continue +; RV64ZBA-NEXT: .LBB65_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3976,26 +4024,26 @@ ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a1, a1 ; RV32-NEXT: add a2, a2, a0 -; RV32-NEXT: beq a2, a1, .LBB65_2 +; RV32-NEXT: beq a2, a1, .LBB66_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 -; RV32-NEXT: .LBB65_2: # %entry -; RV32-NEXT: beqz a0, .LBB65_4 +; RV32-NEXT: .LBB66_2: # %entry +; RV32-NEXT: beqz a0, .LBB66_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB65_4: # %continue +; RV32-NEXT: .LBB66_4: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a0 -; RV64-NEXT: bgeu a1, a0, .LBB65_2 +; RV64-NEXT: bgeu a1, a0, .LBB66_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB65_2: # %continue +; RV64-NEXT: .LBB66_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -4005,26 +4053,26 @@ ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a1, a1 ; RV32ZBA-NEXT: add a2, a2, a0 -; RV32ZBA-NEXT: beq a2, a1, .LBB65_2 +; RV32ZBA-NEXT: beq a2, a1, .LBB66_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 -; RV32ZBA-NEXT: .LBB65_2: # %entry -; RV32ZBA-NEXT: beqz a0, .LBB65_4 +; RV32ZBA-NEXT: .LBB66_2: # %entry +; RV32ZBA-NEXT: beqz a0, .LBB66_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB65_4: # %continue +; RV32ZBA-NEXT: .LBB66_4: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a0 -; RV64ZBA-NEXT: bgeu a1, a0, .LBB65_2 +; RV64ZBA-NEXT: bgeu a1, a0, .LBB66_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB65_2: # %continue +; RV64ZBA-NEXT: .LBB66_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: