diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8820,13 +8820,10 @@ EVT SetCCType = getSetCCResultType( DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); SDValue SetCC; - if (IsAdd && isOneConstant(RHS)) { - // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces - // the live range of X. We assume comparing with 0 is cheap. - // TODO: This generalizes to (X + C) < C. - SetCC = - DAG.getSetCC(dl, SetCCType, Result, - DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); + if (IsAdd && isa(RHS)) { + // Special case: uaddo X, C overflowed if X+C < C. This potential reduces + // the live range of X. + SetCC = DAG.getSetCC(dl, SetCCType, Result, RHS, ISD::SETULT); } else { ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6969,11 +6969,10 @@ DAG.getValueType(MVT::i32)); SDValue Overflow; - if (IsAdd && isOneConstant(RHS)) { - // Special case uaddo X, 1 overflowed if the addition result is 0. - // FIXME: We can do this for any constant RHS by using (X + C) < C. - Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, - DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ); + if (IsAdd && isa(RHS)) { + // Special case uaddo X, C overflowed if the addition result is less than + // C. + Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, RHS, ISD::SETULT); } else { // Sign extend the LHS and perform an unsigned compare with the ADDW // result. Since the inputs are sign extended from i32, this is equivalent diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -390,31 +390,29 @@ ; RV32-LABEL: uaddo.i32.constant: ; RV32: # %bb.0: # %entry ; RV32-NEXT: addi a2, a0, -2 -; RV32-NEXT: sltu a0, a2, a0 +; RV32-NEXT: sltiu a0, a2, -2 ; RV32-NEXT: sw a2, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.i32.constant: ; RV64: # %bb.0: # %entry -; RV64-NEXT: sext.w a2, a0 -; RV64-NEXT: addiw a3, a0, -2 -; RV64-NEXT: sltu a0, a3, a2 -; RV64-NEXT: sw a3, 0(a1) +; RV64-NEXT: addiw a2, a0, -2 +; RV64-NEXT: sltiu a0, a2, -2 +; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: addi a2, a0, -2 -; RV32ZBA-NEXT: sltu a0, a2, a0 +; RV32ZBA-NEXT: sltiu a0, a2, -2 ; RV32ZBA-NEXT: sw a2, 0(a1) ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.i32.constant: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: sext.w a2, a0 -; RV64ZBA-NEXT: addiw a3, a0, -2 -; RV64ZBA-NEXT: sltu a0, a3, a2 -; RV64ZBA-NEXT: sw a3, 0(a1) +; RV64ZBA-NEXT: addiw a2, a0, -2 +; RV64ZBA-NEXT: sltiu a0, a2, -2 +; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2) @@ -510,6 +508,56 @@ ret i1 %obit } +define zeroext i1 @uaddo.i64.constant(i64 %v1, i64* %res) { +; RV32-LABEL: uaddo.i64.constant: +; RV32: # %bb.0: # %entry +; RV32-NEXT: mv a3, a0 +; RV32-NEXT: addi a4, a0, 2 +; RV32-NEXT: sltu a0, a4, a0 +; RV32-NEXT: add a5, a1, a0 +; RV32-NEXT: bgeu a4, a3, .LBB11_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: sltu a0, a5, a1 +; RV32-NEXT: .LBB11_2: # %entry +; RV32-NEXT: sw a4, 0(a2) +; RV32-NEXT: sw a5, 4(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: uaddo.i64.constant: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a2, a0, 2 +; RV64-NEXT: sltiu a0, a2, 2 +; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: ret +; +; RV32ZBA-LABEL: uaddo.i64.constant: +; RV32ZBA: # %bb.0: # %entry +; RV32ZBA-NEXT: mv a3, a0 +; RV32ZBA-NEXT: addi a4, a0, 2 +; RV32ZBA-NEXT: sltu a0, a4, a0 +; RV32ZBA-NEXT: add a5, a1, a0 +; RV32ZBA-NEXT: bgeu a4, a3, .LBB11_2 +; RV32ZBA-NEXT: # %bb.1: # %entry +; RV32ZBA-NEXT: sltu a0, a5, a1 +; RV32ZBA-NEXT: .LBB11_2: # %entry +; RV32ZBA-NEXT: sw a4, 0(a2) +; RV32ZBA-NEXT: sw a5, 4(a2) +; RV32ZBA-NEXT: ret +; +; RV64ZBA-LABEL: uaddo.i64.constant: +; RV64ZBA: # %bb.0: # %entry +; RV64ZBA-NEXT: addi a2, a0, 2 +; RV64ZBA-NEXT: sltiu a0, a2, 2 +; RV64ZBA-NEXT: sd a2, 0(a1) +; RV64ZBA-NEXT: ret +entry: + %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2) + %val = extractvalue {i64, i1} %t, 0 + %obit = extractvalue {i64, i1} %t, 1 + store i64 %val, i64* %res + ret i1 %obit +} + define zeroext i1 @uaddo.i64.constant_one(i64 %v1, i64* %res) { ; RV32-LABEL: uaddo.i64.constant_one: ; RV32: # %bb.0: # %entry @@ -517,10 +565,10 @@ ; RV32-NEXT: addi a4, a0, 1 ; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: add a5, a1, a0 -; RV32-NEXT: bgeu a4, a3, .LBB11_2 +; RV32-NEXT: bgeu a4, a3, .LBB12_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a5, a1 -; RV32-NEXT: .LBB11_2: # %entry +; RV32-NEXT: .LBB12_2: # %entry ; RV32-NEXT: sw a4, 0(a2) ; RV32-NEXT: sw a5, 4(a2) ; RV32-NEXT: ret @@ -538,10 +586,10 @@ ; RV32ZBA-NEXT: addi a4, a0, 1 ; RV32ZBA-NEXT: sltu a0, a4, a0 ; RV32ZBA-NEXT: add a5, a1, a0 -; RV32ZBA-NEXT: bgeu a4, a3, .LBB11_2 +; RV32ZBA-NEXT: bgeu a4, a3, .LBB12_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a5, a1 -; RV32ZBA-NEXT: .LBB11_2: # %entry +; RV32ZBA-NEXT: .LBB12_2: # %entry ; RV32ZBA-NEXT: sw a4, 0(a2) ; RV32ZBA-NEXT: sw a5, 4(a2) ; RV32ZBA-NEXT: ret @@ -560,6 +608,189 @@ ret i1 %obit } +define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, i64* %res) { +; RV32-LABEL: uaddo.i64.constant_2048: +; RV32: # %bb.0: # %entry +; RV32-NEXT: mv a3, a0 +; RV32-NEXT: addi a0, a0, 1024 +; RV32-NEXT: addi a4, a0, 1024 +; RV32-NEXT: sltu a0, a4, a3 +; RV32-NEXT: add a5, a1, a0 +; RV32-NEXT: bgeu a4, a3, .LBB13_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: sltu a0, a5, a1 +; RV32-NEXT: .LBB13_2: # %entry +; RV32-NEXT: sw a4, 0(a2) +; RV32-NEXT: sw a5, 4(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: uaddo.i64.constant_2048: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, 1024 +; RV64-NEXT: addi a2, a0, 1024 +; RV64-NEXT: srli a0, a2, 11 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: sd a2, 0(a1) +; RV64-NEXT: ret +; +; RV32ZBA-LABEL: uaddo.i64.constant_2048: +; RV32ZBA: # %bb.0: # %entry +; RV32ZBA-NEXT: mv a3, a0 +; RV32ZBA-NEXT: addi a0, a0, 1024 +; RV32ZBA-NEXT: addi a4, a0, 1024 +; RV32ZBA-NEXT: sltu a0, a4, a3 +; RV32ZBA-NEXT: add a5, a1, a0 +; RV32ZBA-NEXT: bgeu a4, a3, .LBB13_2 +; RV32ZBA-NEXT: # %bb.1: # %entry +; RV32ZBA-NEXT: sltu a0, a5, a1 +; RV32ZBA-NEXT: .LBB13_2: # %entry +; RV32ZBA-NEXT: sw a4, 0(a2) +; RV32ZBA-NEXT: sw a5, 4(a2) +; RV32ZBA-NEXT: ret +; +; RV64ZBA-LABEL: uaddo.i64.constant_2048: +; RV64ZBA: # %bb.0: # %entry +; RV64ZBA-NEXT: addi a0, a0, 1024 +; RV64ZBA-NEXT: addi a2, a0, 1024 +; RV64ZBA-NEXT: srli a0, a2, 11 +; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: sd a2, 0(a1) +; RV64ZBA-NEXT: ret +entry: + %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2048) + %val = extractvalue {i64, i1} %t, 0 + %obit = extractvalue {i64, i1} %t, 1 + store i64 %val, i64* %res + ret i1 %obit +} + +define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, i64* %res) { +; RV32-LABEL: uaddo.i64.constant_2049: +; RV32: # %bb.0: # %entry +; RV32-NEXT: mv a3, a0 +; RV32-NEXT: addi a0, a0, 1025 +; RV32-NEXT: addi a4, a0, 1024 +; RV32-NEXT: sltu a0, a4, a3 +; RV32-NEXT: add a5, a1, a0 +; RV32-NEXT: bgeu a4, a3, .LBB14_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: sltu a0, a5, a1 +; RV32-NEXT: .LBB14_2: # %entry +; RV32-NEXT: sw a4, 0(a2) +; RV32-NEXT: sw a5, 4(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: uaddo.i64.constant_2049: +; RV64: # %bb.0: # %entry +; RV64-NEXT: lui a2, 1 +; RV64-NEXT: addiw a2, a2, -2047 +; RV64-NEXT: add a3, a0, a2 +; RV64-NEXT: sltu a0, a3, a2 +; RV64-NEXT: sd a3, 0(a1) +; RV64-NEXT: ret +; +; RV32ZBA-LABEL: uaddo.i64.constant_2049: +; RV32ZBA: # %bb.0: # %entry +; RV32ZBA-NEXT: mv a3, a0 +; RV32ZBA-NEXT: addi a0, a0, 1025 +; RV32ZBA-NEXT: addi a4, a0, 1024 +; RV32ZBA-NEXT: sltu a0, a4, a3 +; RV32ZBA-NEXT: add a5, a1, a0 +; RV32ZBA-NEXT: bgeu a4, a3, .LBB14_2 +; RV32ZBA-NEXT: # %bb.1: # %entry +; RV32ZBA-NEXT: sltu a0, a5, a1 +; RV32ZBA-NEXT: .LBB14_2: # %entry +; RV32ZBA-NEXT: sw a4, 0(a2) +; RV32ZBA-NEXT: sw a5, 4(a2) +; RV32ZBA-NEXT: ret +; +; RV64ZBA-LABEL: uaddo.i64.constant_2049: +; RV64ZBA: # %bb.0: # %entry +; RV64ZBA-NEXT: lui a2, 1 +; RV64ZBA-NEXT: addiw a2, a2, -2047 +; RV64ZBA-NEXT: add a3, a0, a2 +; RV64ZBA-NEXT: sltu a0, a3, a2 +; RV64ZBA-NEXT: sd a3, 0(a1) +; RV64ZBA-NEXT: ret +entry: + %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2049) + %val = extractvalue {i64, i1} %t, 0 + %obit = extractvalue {i64, i1} %t, 1 + store i64 %val, i64* %res + ret i1 %obit +} + +define i64 @uaddo.i64.constant_setcc_on_overflow_flag(i64 %v1) { +; RV32-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: +; RV32: # %bb.0: # %entry +; RV32-NEXT: mv a2, a1 +; RV32-NEXT: mv a3, a0 +; RV32-NEXT: addi a0, a0, 2 +; RV32-NEXT: sltu a4, a0, a3 +; RV32-NEXT: add a1, a1, a4 +; RV32-NEXT: bltu a0, a3, .LBB15_3 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: bnez a4, .LBB15_4 +; RV32-NEXT: .LBB15_2: # %IfNoOverflow +; RV32-NEXT: ret +; RV32-NEXT: .LBB15_3: # %entry +; RV32-NEXT: sltu a4, a1, a2 +; RV32-NEXT: beqz a4, .LBB15_2 +; RV32-NEXT: .LBB15_4: # %IfOverflow +; RV32-NEXT: li a0, 0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: ret +; +; RV64-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: +; RV64: # %bb.0: # %entry +; RV64-NEXT: addi a0, a0, 2 +; RV64-NEXT: li a1, 2 +; RV64-NEXT: bgeu a0, a1, .LBB15_2 +; RV64-NEXT: # %bb.1: # %IfOverflow +; RV64-NEXT: li a0, 0 +; RV64-NEXT: .LBB15_2: # %IfNoOverflow +; RV64-NEXT: ret +; +; RV32ZBA-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: +; RV32ZBA: # %bb.0: # %entry +; RV32ZBA-NEXT: mv a2, a1 +; RV32ZBA-NEXT: mv a3, a0 +; RV32ZBA-NEXT: addi a0, a0, 2 +; RV32ZBA-NEXT: sltu a4, a0, a3 +; RV32ZBA-NEXT: add a1, a1, a4 +; RV32ZBA-NEXT: bltu a0, a3, .LBB15_3 +; RV32ZBA-NEXT: # %bb.1: # %entry +; RV32ZBA-NEXT: bnez a4, .LBB15_4 +; RV32ZBA-NEXT: .LBB15_2: # %IfNoOverflow +; RV32ZBA-NEXT: ret +; RV32ZBA-NEXT: .LBB15_3: # %entry +; RV32ZBA-NEXT: sltu a4, a1, a2 +; RV32ZBA-NEXT: beqz a4, .LBB15_2 +; RV32ZBA-NEXT: .LBB15_4: # %IfOverflow +; RV32ZBA-NEXT: li a0, 0 +; RV32ZBA-NEXT: li a1, 0 +; RV32ZBA-NEXT: ret +; +; RV64ZBA-LABEL: uaddo.i64.constant_setcc_on_overflow_flag: +; RV64ZBA: # %bb.0: # %entry +; RV64ZBA-NEXT: addi a0, a0, 2 +; RV64ZBA-NEXT: li a1, 2 +; RV64ZBA-NEXT: bgeu a0, a1, .LBB15_2 +; RV64ZBA-NEXT: # %bb.1: # %IfOverflow +; RV64ZBA-NEXT: li a0, 0 +; RV64ZBA-NEXT: .LBB15_2: # %IfNoOverflow +; RV64ZBA-NEXT: ret +entry: + %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2) + %val = extractvalue {i64, i1} %t, 0 + %obit = extractvalue {i64, i1} %t, 1 + br i1 %obit, label %IfOverflow, label %IfNoOverflow +IfOverflow: + ret i64 0 +IfNoOverflow: + ret i64 %val +} + define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, i32* %res) { ; RV32-LABEL: ssubo1.i32: ; RV32: # %bb.0: # %entry @@ -833,13 +1064,13 @@ ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a5 ; RV32-NEXT: sub a2, a0, a2 -; RV32-NEXT: beq a3, a1, .LBB18_2 +; RV32-NEXT: beq a3, a1, .LBB22_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 -; RV32-NEXT: j .LBB18_3 -; RV32-NEXT: .LBB18_2: +; RV32-NEXT: j .LBB22_3 +; RV32-NEXT: .LBB22_2: ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: .LBB18_3: # %entry +; RV32-NEXT: .LBB22_3: # %entry ; RV32-NEXT: sw a2, 0(a4) ; RV32-NEXT: sw a3, 4(a4) ; RV32-NEXT: ret @@ -857,13 +1088,13 @@ ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a5 ; RV32ZBA-NEXT: sub a2, a0, a2 -; RV32ZBA-NEXT: beq a3, a1, .LBB18_2 +; RV32ZBA-NEXT: beq a3, a1, .LBB22_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 -; RV32ZBA-NEXT: j .LBB18_3 -; RV32ZBA-NEXT: .LBB18_2: +; RV32ZBA-NEXT: j .LBB22_3 +; RV32ZBA-NEXT: .LBB22_2: ; RV32ZBA-NEXT: sltu a0, a0, a2 -; RV32ZBA-NEXT: .LBB18_3: # %entry +; RV32ZBA-NEXT: .LBB22_3: # %entry ; RV32ZBA-NEXT: sw a2, 0(a4) ; RV32ZBA-NEXT: sw a3, 4(a4) ; RV32ZBA-NEXT: ret @@ -1504,10 +1735,10 @@ ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a2, a2, a0 ; RV32-NEXT: slti a3, a1, 0 -; RV32-NEXT: bne a3, a2, .LBB28_2 +; RV32-NEXT: bne a3, a2, .LBB32_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB28_2: # %entry +; RV32-NEXT: .LBB32_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i32: @@ -1516,10 +1747,10 @@ ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: addw a2, a3, a2 -; RV64-NEXT: bne a2, a4, .LBB28_2 +; RV64-NEXT: bne a2, a4, .LBB32_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB28_2: # %entry +; RV64-NEXT: .LBB32_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i32: @@ -1527,10 +1758,10 @@ ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a2, a2, a0 ; RV32ZBA-NEXT: slti a3, a1, 0 -; RV32ZBA-NEXT: bne a3, a2, .LBB28_2 +; RV32ZBA-NEXT: bne a3, a2, .LBB32_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB28_2: # %entry +; RV32ZBA-NEXT: .LBB32_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i32: @@ -1539,10 +1770,10 @@ ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: add a4, a3, a2 ; RV64ZBA-NEXT: addw a2, a3, a2 -; RV64ZBA-NEXT: bne a2, a4, .LBB28_2 +; RV64ZBA-NEXT: bne a2, a4, .LBB32_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB28_2: # %entry +; RV64ZBA-NEXT: .LBB32_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1607,11 +1838,11 @@ ; RV32-NEXT: xor a5, a1, a3 ; RV32-NEXT: not a5, a5 ; RV32-NEXT: and a4, a5, a4 -; RV32-NEXT: bltz a4, .LBB30_2 +; RV32-NEXT: bltz a4, .LBB34_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB30_2: # %entry +; RV32-NEXT: .LBB34_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.select.i64: @@ -1619,10 +1850,10 @@ ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a2, a2, a0 ; RV64-NEXT: slti a3, a1, 0 -; RV64-NEXT: bne a3, a2, .LBB30_2 +; RV64-NEXT: bne a3, a2, .LBB34_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB30_2: # %entry +; RV64-NEXT: .LBB34_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: saddo.select.i64: @@ -1635,11 +1866,11 @@ ; RV32ZBA-NEXT: xor a5, a1, a3 ; RV32ZBA-NEXT: not a5, a5 ; RV32ZBA-NEXT: and a4, a5, a4 -; RV32ZBA-NEXT: bltz a4, .LBB30_2 +; RV32ZBA-NEXT: bltz a4, .LBB34_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB30_2: # %entry +; RV32ZBA-NEXT: .LBB34_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.select.i64: @@ -1647,10 +1878,10 @@ ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a2, a2, a0 ; RV64ZBA-NEXT: slti a3, a1, 0 -; RV64ZBA-NEXT: bne a3, a2, .LBB30_2 +; RV64ZBA-NEXT: bne a3, a2, .LBB34_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB30_2: # %entry +; RV64ZBA-NEXT: .LBB34_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1716,39 +1947,39 @@ ; RV32-LABEL: uaddo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 -; RV32-NEXT: bltu a2, a0, .LBB32_2 +; RV32-NEXT: bltu a2, a0, .LBB36_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB32_2: # %entry +; RV32-NEXT: .LBB36_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a2, a0, a1 ; RV64-NEXT: sext.w a3, a0 -; RV64-NEXT: bltu a2, a3, .LBB32_2 +; RV64-NEXT: bltu a2, a3, .LBB36_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB32_2: # %entry +; RV64-NEXT: .LBB36_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a2, a0, a1 -; RV32ZBA-NEXT: bltu a2, a0, .LBB32_2 +; RV32ZBA-NEXT: bltu a2, a0, .LBB36_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB32_2: # %entry +; RV32ZBA-NEXT: .LBB36_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a2, a0, a1 ; RV64ZBA-NEXT: sext.w a3, a0 -; RV64ZBA-NEXT: bltu a2, a3, .LBB32_2 +; RV64ZBA-NEXT: bltu a2, a3, .LBB36_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB32_2: # %entry +; RV64ZBA-NEXT: .LBB36_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -1801,15 +2032,15 @@ ; RV32-NEXT: add a4, a0, a2 ; RV32-NEXT: sltu a4, a4, a0 ; RV32-NEXT: add a5, a5, a4 -; RV32-NEXT: bne a5, a1, .LBB34_3 +; RV32-NEXT: bne a5, a1, .LBB38_3 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: beqz a4, .LBB34_4 -; RV32-NEXT: .LBB34_2: # %entry +; RV32-NEXT: beqz a4, .LBB38_4 +; RV32-NEXT: .LBB38_2: # %entry ; RV32-NEXT: ret -; RV32-NEXT: .LBB34_3: # %entry +; RV32-NEXT: .LBB38_3: # %entry ; RV32-NEXT: sltu a4, a5, a1 -; RV32-NEXT: bnez a4, .LBB34_2 -; RV32-NEXT: .LBB34_4: # %entry +; RV32-NEXT: bnez a4, .LBB38_2 +; RV32-NEXT: .LBB38_4: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: ret @@ -1817,10 +2048,10 @@ ; RV64-LABEL: uaddo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 -; RV64-NEXT: bltu a2, a0, .LBB34_2 +; RV64-NEXT: bltu a2, a0, .LBB38_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB34_2: # %entry +; RV64-NEXT: .LBB38_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.select.i64: @@ -1829,15 +2060,15 @@ ; RV32ZBA-NEXT: add a4, a0, a2 ; RV32ZBA-NEXT: sltu a4, a4, a0 ; RV32ZBA-NEXT: add a5, a5, a4 -; RV32ZBA-NEXT: bne a5, a1, .LBB34_3 +; RV32ZBA-NEXT: bne a5, a1, .LBB38_3 ; RV32ZBA-NEXT: # %bb.1: # %entry -; RV32ZBA-NEXT: beqz a4, .LBB34_4 -; RV32ZBA-NEXT: .LBB34_2: # %entry +; RV32ZBA-NEXT: beqz a4, .LBB38_4 +; RV32ZBA-NEXT: .LBB38_2: # %entry ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB34_3: # %entry +; RV32ZBA-NEXT: .LBB38_3: # %entry ; RV32ZBA-NEXT: sltu a4, a5, a1 -; RV32ZBA-NEXT: bnez a4, .LBB34_2 -; RV32ZBA-NEXT: .LBB34_4: # %entry +; RV32ZBA-NEXT: bnez a4, .LBB38_2 +; RV32ZBA-NEXT: .LBB38_4: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 ; RV32ZBA-NEXT: ret @@ -1845,10 +2076,10 @@ ; RV64ZBA-LABEL: uaddo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a2, a0, a1 -; RV64ZBA-NEXT: bltu a2, a0, .LBB34_2 +; RV64ZBA-NEXT: bltu a2, a0, .LBB38_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB34_2: # %entry +; RV64ZBA-NEXT: .LBB38_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) @@ -1864,10 +2095,10 @@ ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a3, a0 -; RV32-NEXT: beq a2, a1, .LBB35_2 +; RV32-NEXT: beq a2, a1, .LBB39_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 -; RV32-NEXT: .LBB35_2: # %entry +; RV32-NEXT: .LBB39_2: # %entry ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; @@ -1884,10 +2115,10 @@ ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a3, a0 -; RV32ZBA-NEXT: beq a2, a1, .LBB35_2 +; RV32ZBA-NEXT: beq a2, a1, .LBB39_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 -; RV32ZBA-NEXT: .LBB35_2: # %entry +; RV32ZBA-NEXT: .LBB39_2: # %entry ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; @@ -1910,10 +2141,10 @@ ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a3, a0, a1 ; RV32-NEXT: slt a3, a3, a0 -; RV32-NEXT: bne a2, a3, .LBB36_2 +; RV32-NEXT: bne a2, a3, .LBB40_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB36_2: # %entry +; RV32-NEXT: .LBB40_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i32: @@ -1922,10 +2153,10 @@ ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: sub a4, a3, a2 ; RV64-NEXT: subw a2, a3, a2 -; RV64-NEXT: bne a2, a4, .LBB36_2 +; RV64-NEXT: bne a2, a4, .LBB40_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB36_2: # %entry +; RV64-NEXT: .LBB40_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i32: @@ -1933,10 +2164,10 @@ ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a3, a0, a1 ; RV32ZBA-NEXT: slt a3, a3, a0 -; RV32ZBA-NEXT: bne a2, a3, .LBB36_2 +; RV32ZBA-NEXT: bne a2, a3, .LBB40_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB36_2: # %entry +; RV32ZBA-NEXT: .LBB40_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i32: @@ -1945,10 +2176,10 @@ ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: sub a4, a3, a2 ; RV64ZBA-NEXT: subw a2, a3, a2 -; RV64ZBA-NEXT: bne a2, a4, .LBB36_2 +; RV64ZBA-NEXT: bne a2, a4, .LBB40_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB36_2: # %entry +; RV64ZBA-NEXT: .LBB40_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) @@ -2011,11 +2242,11 @@ ; RV32-NEXT: xor a4, a1, a4 ; RV32-NEXT: xor a5, a1, a3 ; RV32-NEXT: and a4, a5, a4 -; RV32-NEXT: bltz a4, .LBB38_2 +; RV32-NEXT: bltz a4, .LBB42_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB38_2: # %entry +; RV32-NEXT: .LBB42_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: ssubo.select.i64: @@ -2023,10 +2254,10 @@ ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a3, a0, a1 ; RV64-NEXT: slt a3, a3, a0 -; RV64-NEXT: bne a2, a3, .LBB38_2 +; RV64-NEXT: bne a2, a3, .LBB42_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB38_2: # %entry +; RV64-NEXT: .LBB42_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: ssubo.select.i64: @@ -2037,11 +2268,11 @@ ; RV32ZBA-NEXT: xor a4, a1, a4 ; RV32ZBA-NEXT: xor a5, a1, a3 ; RV32ZBA-NEXT: and a4, a5, a4 -; RV32ZBA-NEXT: bltz a4, .LBB38_2 +; RV32ZBA-NEXT: bltz a4, .LBB42_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB38_2: # %entry +; RV32ZBA-NEXT: .LBB42_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssubo.select.i64: @@ -2049,10 +2280,10 @@ ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a3, a0, a1 ; RV64ZBA-NEXT: slt a3, a3, a0 -; RV64ZBA-NEXT: bne a2, a3, .LBB38_2 +; RV64ZBA-NEXT: bne a2, a3, .LBB42_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB38_2: # %entry +; RV64ZBA-NEXT: .LBB42_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2114,39 +2345,39 @@ ; RV32-LABEL: usubo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a2, a0, a1 -; RV32-NEXT: bltu a0, a2, .LBB40_2 +; RV32-NEXT: bltu a0, a2, .LBB44_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB40_2: # %entry +; RV32-NEXT: .LBB44_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i32: ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a2, a0, a1 ; RV64-NEXT: sext.w a3, a0 -; RV64-NEXT: bltu a3, a2, .LBB40_2 +; RV64-NEXT: bltu a3, a2, .LBB44_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB40_2: # %entry +; RV64-NEXT: .LBB44_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a2, a0, a1 -; RV32ZBA-NEXT: bltu a0, a2, .LBB40_2 +; RV32ZBA-NEXT: bltu a0, a2, .LBB44_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB40_2: # %entry +; RV32ZBA-NEXT: .LBB44_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i32: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a2, a0, a1 ; RV64ZBA-NEXT: sext.w a3, a0 -; RV64ZBA-NEXT: bltu a3, a2, .LBB40_2 +; RV64ZBA-NEXT: bltu a3, a2, .LBB44_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB40_2: # %entry +; RV64ZBA-NEXT: .LBB44_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -2198,28 +2429,28 @@ ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a5, a1, a3 ; RV32-NEXT: sub a4, a5, a4 -; RV32-NEXT: beq a4, a1, .LBB42_2 +; RV32-NEXT: beq a4, a1, .LBB46_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a4, a1, a4 -; RV32-NEXT: beqz a4, .LBB42_3 -; RV32-NEXT: j .LBB42_4 -; RV32-NEXT: .LBB42_2: +; RV32-NEXT: beqz a4, .LBB46_3 +; RV32-NEXT: j .LBB46_4 +; RV32-NEXT: .LBB46_2: ; RV32-NEXT: sub a4, a0, a2 ; RV32-NEXT: sltu a4, a0, a4 -; RV32-NEXT: bnez a4, .LBB42_4 -; RV32-NEXT: .LBB42_3: # %entry +; RV32-NEXT: bnez a4, .LBB46_4 +; RV32-NEXT: .LBB46_3: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB42_4: # %entry +; RV32-NEXT: .LBB46_4: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a2, a0, a1 -; RV64-NEXT: bltu a0, a2, .LBB42_2 +; RV64-NEXT: bltu a0, a2, .LBB46_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB42_2: # %entry +; RV64-NEXT: .LBB46_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.select.i64: @@ -2227,28 +2458,28 @@ ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a5, a1, a3 ; RV32ZBA-NEXT: sub a4, a5, a4 -; RV32ZBA-NEXT: beq a4, a1, .LBB42_2 +; RV32ZBA-NEXT: beq a4, a1, .LBB46_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a4, a1, a4 -; RV32ZBA-NEXT: beqz a4, .LBB42_3 -; RV32ZBA-NEXT: j .LBB42_4 -; RV32ZBA-NEXT: .LBB42_2: +; RV32ZBA-NEXT: beqz a4, .LBB46_3 +; RV32ZBA-NEXT: j .LBB46_4 +; RV32ZBA-NEXT: .LBB46_2: ; RV32ZBA-NEXT: sub a4, a0, a2 ; RV32ZBA-NEXT: sltu a4, a0, a4 -; RV32ZBA-NEXT: bnez a4, .LBB42_4 -; RV32ZBA-NEXT: .LBB42_3: # %entry +; RV32ZBA-NEXT: bnez a4, .LBB46_4 +; RV32ZBA-NEXT: .LBB46_3: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB42_4: # %entry +; RV32ZBA-NEXT: .LBB46_4: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a2, a0, a1 -; RV64ZBA-NEXT: bltu a0, a2, .LBB42_2 +; RV64ZBA-NEXT: bltu a0, a2, .LBB46_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB42_2: # %entry +; RV64ZBA-NEXT: .LBB46_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) @@ -2263,12 +2494,12 @@ ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a4 -; RV32-NEXT: beq a3, a1, .LBB43_2 +; RV32-NEXT: beq a3, a1, .LBB47_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret -; RV32-NEXT: .LBB43_2: +; RV32-NEXT: .LBB47_2: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 ; RV32-NEXT: xori a0, a0, 1 @@ -2286,12 +2517,12 @@ ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a4 -; RV32ZBA-NEXT: beq a3, a1, .LBB43_2 +; RV32ZBA-NEXT: beq a3, a1, .LBB47_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB43_2: +; RV32ZBA-NEXT: .LBB47_2: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 ; RV32ZBA-NEXT: xori a0, a0, 1 @@ -2316,10 +2547,10 @@ ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a3, a0, a1 ; RV32-NEXT: srai a3, a3, 31 -; RV32-NEXT: bne a2, a3, .LBB44_2 +; RV32-NEXT: bne a2, a3, .LBB48_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB44_2: # %entry +; RV32-NEXT: .LBB48_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: smulo.select.i32: @@ -2328,10 +2559,10 @@ ; RV64-NEXT: sext.w a3, a0 ; RV64-NEXT: mul a4, a3, a2 ; RV64-NEXT: mulw a2, a3, a2 -; RV64-NEXT: bne a2, a4, .LBB44_2 +; RV64-NEXT: bne a2, a4, .LBB48_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB44_2: # %entry +; RV64-NEXT: .LBB48_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i32: @@ -2339,10 +2570,10 @@ ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a3, a0, a1 ; RV32ZBA-NEXT: srai a3, a3, 31 -; RV32ZBA-NEXT: bne a2, a3, .LBB44_2 +; RV32ZBA-NEXT: bne a2, a3, .LBB48_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB44_2: # %entry +; RV32ZBA-NEXT: .LBB48_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: smulo.select.i32: @@ -2351,10 +2582,10 @@ ; RV64ZBA-NEXT: sext.w a3, a0 ; RV64ZBA-NEXT: mul a4, a3, a2 ; RV64ZBA-NEXT: mulw a2, a3, a2 -; RV64ZBA-NEXT: bne a2, a4, .LBB44_2 +; RV64ZBA-NEXT: bne a2, a4, .LBB48_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB44_2: # %entry +; RV64ZBA-NEXT: .LBB48_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2458,11 +2689,11 @@ ; RV32-NEXT: xor a5, a5, a4 ; RV32-NEXT: xor a4, t6, a4 ; RV32-NEXT: or a4, a4, a5 -; RV32-NEXT: bnez a4, .LBB46_2 +; RV32-NEXT: bnez a4, .LBB50_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB46_2: # %entry +; RV32-NEXT: .LBB50_2: # %entry ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2472,10 +2703,10 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a3, a0, a1 ; RV64-NEXT: srai a3, a3, 63 -; RV64-NEXT: bne a2, a3, .LBB46_2 +; RV64-NEXT: bne a2, a3, .LBB50_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB46_2: # %entry +; RV64-NEXT: .LBB50_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: smulo.select.i64: @@ -2527,11 +2758,11 @@ ; RV32ZBA-NEXT: xor a5, a5, a4 ; RV32ZBA-NEXT: xor a4, t6, a4 ; RV32ZBA-NEXT: or a4, a4, a5 -; RV32ZBA-NEXT: bnez a4, .LBB46_2 +; RV32ZBA-NEXT: bnez a4, .LBB50_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB46_2: # %entry +; RV32ZBA-NEXT: .LBB50_2: # %entry ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret @@ -2541,10 +2772,10 @@ ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a3, a0, a1 ; RV64ZBA-NEXT: srai a3, a3, 63 -; RV64ZBA-NEXT: bne a2, a3, .LBB46_2 +; RV64ZBA-NEXT: bne a2, a3, .LBB50_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB46_2: # %entry +; RV64ZBA-NEXT: .LBB50_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2690,10 +2921,10 @@ ; RV32-LABEL: umulo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a2, a0, a1 -; RV32-NEXT: bnez a2, .LBB48_2 +; RV32-NEXT: bnez a2, .LBB52_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: .LBB48_2: # %entry +; RV32-NEXT: .LBB52_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i32: @@ -2702,19 +2933,19 @@ ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: mulhu a2, a3, a2 ; RV64-NEXT: srli a2, a2, 32 -; RV64-NEXT: bnez a2, .LBB48_2 +; RV64-NEXT: bnez a2, .LBB52_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB48_2: # %entry +; RV64-NEXT: .LBB52_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a2, a0, a1 -; RV32ZBA-NEXT: bnez a2, .LBB48_2 +; RV32ZBA-NEXT: bnez a2, .LBB52_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a1 -; RV32ZBA-NEXT: .LBB48_2: # %entry +; RV32ZBA-NEXT: .LBB52_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i32: @@ -2723,10 +2954,10 @@ ; RV64ZBA-NEXT: zext.w a3, a0 ; RV64ZBA-NEXT: mul a2, a3, a2 ; RV64ZBA-NEXT: srli a2, a2, 32 -; RV64ZBA-NEXT: bnez a2, .LBB48_2 +; RV64ZBA-NEXT: bnez a2, .LBB52_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB48_2: # %entry +; RV64ZBA-NEXT: .LBB52_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) @@ -2791,20 +3022,20 @@ ; RV32-NEXT: snez a6, a6 ; RV32-NEXT: or a5, a5, a6 ; RV32-NEXT: or a4, a5, a4 -; RV32-NEXT: bnez a4, .LBB50_2 +; RV32-NEXT: bnez a4, .LBB54_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB50_2: # %entry +; RV32-NEXT: .LBB54_2: # %entry ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a2, a0, a1 -; RV64-NEXT: bnez a2, .LBB50_2 +; RV64-NEXT: bnez a2, .LBB54_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB50_2: # %entry +; RV64-NEXT: .LBB54_2: # %entry ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.select.i64: @@ -2825,20 +3056,20 @@ ; RV32ZBA-NEXT: snez a6, a6 ; RV32ZBA-NEXT: or a5, a5, a6 ; RV32ZBA-NEXT: or a4, a5, a4 -; RV32ZBA-NEXT: bnez a4, .LBB50_2 +; RV32ZBA-NEXT: bnez a4, .LBB54_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 ; RV32ZBA-NEXT: mv a1, a3 -; RV32ZBA-NEXT: .LBB50_2: # %entry +; RV32ZBA-NEXT: .LBB54_2: # %entry ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.select.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a2, a0, a1 -; RV64ZBA-NEXT: bnez a2, .LBB50_2 +; RV64ZBA-NEXT: bnez a2, .LBB54_2 ; RV64ZBA-NEXT: # %bb.1: # %entry ; RV64ZBA-NEXT: mv a0, a1 -; RV64ZBA-NEXT: .LBB50_2: # %entry +; RV64ZBA-NEXT: .LBB54_2: # %entry ; RV64ZBA-NEXT: ret entry: %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2) @@ -2918,11 +3149,11 @@ ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a0, a2, a0 ; RV32-NEXT: slti a1, a1, 0 -; RV32-NEXT: beq a1, a0, .LBB52_2 +; RV32-NEXT: beq a1, a0, .LBB56_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB52_2: # %continue +; RV32-NEXT: .LBB56_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -2932,11 +3163,11 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: addw a0, a0, a1 -; RV64-NEXT: beq a0, a2, .LBB52_2 +; RV64-NEXT: beq a0, a2, .LBB56_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB52_2: # %continue +; RV64-NEXT: .LBB56_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -2945,11 +3176,11 @@ ; RV32ZBA-NEXT: add a2, a0, a1 ; RV32ZBA-NEXT: slt a0, a2, a0 ; RV32ZBA-NEXT: slti a1, a1, 0 -; RV32ZBA-NEXT: beq a1, a0, .LBB52_2 +; RV32ZBA-NEXT: beq a1, a0, .LBB56_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB52_2: # %continue +; RV32ZBA-NEXT: .LBB56_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -2959,11 +3190,11 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: addw a0, a0, a1 -; RV64ZBA-NEXT: beq a0, a2, .LBB52_2 +; RV64ZBA-NEXT: beq a0, a2, .LBB56_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB52_2: # %continue +; RV64ZBA-NEXT: .LBB56_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -2990,11 +3221,11 @@ ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: bgez a0, .LBB53_2 +; RV32-NEXT: bgez a0, .LBB57_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB53_2: # %continue +; RV32-NEXT: .LBB57_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3003,11 +3234,11 @@ ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a0, a2, a0 ; RV64-NEXT: slti a1, a1, 0 -; RV64-NEXT: beq a1, a0, .LBB53_2 +; RV64-NEXT: beq a1, a0, .LBB57_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB53_2: # %continue +; RV64-NEXT: .LBB57_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3021,11 +3252,11 @@ ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: bgez a0, .LBB53_2 +; RV32ZBA-NEXT: bgez a0, .LBB57_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB53_2: # %continue +; RV32ZBA-NEXT: .LBB57_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3034,11 +3265,11 @@ ; RV64ZBA-NEXT: add a2, a0, a1 ; RV64ZBA-NEXT: slt a0, a2, a0 ; RV64ZBA-NEXT: slti a1, a1, 0 -; RV64ZBA-NEXT: beq a1, a0, .LBB53_2 +; RV64ZBA-NEXT: beq a1, a0, .LBB57_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB53_2: # %continue +; RV64ZBA-NEXT: .LBB57_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3058,11 +3289,11 @@ ; RV32-LABEL: uaddo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a1, a0, a1 -; RV32-NEXT: bgeu a1, a0, .LBB54_2 +; RV32-NEXT: bgeu a1, a0, .LBB58_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB54_2: # %continue +; RV32-NEXT: .LBB58_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3070,22 +3301,22 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: addw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: bgeu a1, a0, .LBB54_2 +; RV64-NEXT: bgeu a1, a0, .LBB58_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB54_2: # %continue +; RV64-NEXT: .LBB58_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: add a1, a0, a1 -; RV32ZBA-NEXT: bgeu a1, a0, .LBB54_2 +; RV32ZBA-NEXT: bgeu a1, a0, .LBB58_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB54_2: # %continue +; RV32ZBA-NEXT: .LBB58_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3093,11 +3324,11 @@ ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 -; RV64ZBA-NEXT: bgeu a1, a0, .LBB54_2 +; RV64ZBA-NEXT: bgeu a1, a0, .LBB58_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB54_2: # %continue +; RV64ZBA-NEXT: .LBB58_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3120,26 +3351,26 @@ ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a3, a0 -; RV32-NEXT: beq a2, a1, .LBB55_2 +; RV32-NEXT: beq a2, a1, .LBB59_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 -; RV32-NEXT: .LBB55_2: # %entry -; RV32-NEXT: beqz a0, .LBB55_4 +; RV32-NEXT: .LBB59_2: # %entry +; RV32-NEXT: beqz a0, .LBB59_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB55_4: # %continue +; RV32-NEXT: .LBB59_4: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a1 -; RV64-NEXT: bgeu a1, a0, .LBB55_2 +; RV64-NEXT: bgeu a1, a0, .LBB59_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB55_2: # %continue +; RV64-NEXT: .LBB59_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3149,26 +3380,26 @@ ; RV32ZBA-NEXT: add a2, a0, a2 ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a3, a0 -; RV32ZBA-NEXT: beq a2, a1, .LBB55_2 +; RV32ZBA-NEXT: beq a2, a1, .LBB59_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 -; RV32ZBA-NEXT: .LBB55_2: # %entry -; RV32ZBA-NEXT: beqz a0, .LBB55_4 +; RV32ZBA-NEXT: .LBB59_2: # %entry +; RV32ZBA-NEXT: beqz a0, .LBB59_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB55_4: # %continue +; RV32ZBA-NEXT: .LBB59_4: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: uaddo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a1 -; RV64ZBA-NEXT: bgeu a1, a0, .LBB55_2 +; RV64ZBA-NEXT: bgeu a1, a0, .LBB59_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB55_2: # %continue +; RV64ZBA-NEXT: .LBB59_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3190,11 +3421,11 @@ ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a1, a0, a1 ; RV32-NEXT: slt a0, a1, a0 -; RV32-NEXT: beq a2, a0, .LBB56_2 +; RV32-NEXT: beq a2, a0, .LBB60_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB56_2: # %continue +; RV32-NEXT: .LBB60_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3204,11 +3435,11 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: sub a2, a0, a1 ; RV64-NEXT: subw a0, a0, a1 -; RV64-NEXT: beq a0, a2, .LBB56_2 +; RV64-NEXT: beq a0, a2, .LBB60_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB56_2: # %continue +; RV64-NEXT: .LBB60_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3217,11 +3448,11 @@ ; RV32ZBA-NEXT: sgtz a2, a1 ; RV32ZBA-NEXT: sub a1, a0, a1 ; RV32ZBA-NEXT: slt a0, a1, a0 -; RV32ZBA-NEXT: beq a2, a0, .LBB56_2 +; RV32ZBA-NEXT: beq a2, a0, .LBB60_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB56_2: # %continue +; RV32ZBA-NEXT: .LBB60_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3231,11 +3462,11 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: sub a2, a0, a1 ; RV64ZBA-NEXT: subw a0, a0, a1 -; RV64ZBA-NEXT: beq a0, a2, .LBB56_2 +; RV64ZBA-NEXT: beq a0, a2, .LBB60_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB56_2: # %continue +; RV64ZBA-NEXT: .LBB60_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3260,11 +3491,11 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: bgez a0, .LBB57_2 +; RV32-NEXT: bgez a0, .LBB61_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB57_2: # %continue +; RV32-NEXT: .LBB61_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3273,11 +3504,11 @@ ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a1, a0, a1 ; RV64-NEXT: slt a0, a1, a0 -; RV64-NEXT: beq a2, a0, .LBB57_2 +; RV64-NEXT: beq a2, a0, .LBB61_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB57_2: # %continue +; RV64-NEXT: .LBB61_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3289,11 +3520,11 @@ ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: bgez a0, .LBB57_2 +; RV32ZBA-NEXT: bgez a0, .LBB61_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB57_2: # %continue +; RV32ZBA-NEXT: .LBB61_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3302,11 +3533,11 @@ ; RV64ZBA-NEXT: sgtz a2, a1 ; RV64ZBA-NEXT: sub a1, a0, a1 ; RV64ZBA-NEXT: slt a0, a1, a0 -; RV64ZBA-NEXT: beq a2, a0, .LBB57_2 +; RV64ZBA-NEXT: beq a2, a0, .LBB61_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB57_2: # %continue +; RV64ZBA-NEXT: .LBB61_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3326,11 +3557,11 @@ ; RV32-LABEL: usubo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a1, a0, a1 -; RV32-NEXT: bgeu a0, a1, .LBB58_2 +; RV32-NEXT: bgeu a0, a1, .LBB62_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB58_2: # %continue +; RV32-NEXT: .LBB62_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3338,22 +3569,22 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: subw a1, a0, a1 ; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: bgeu a0, a1, .LBB58_2 +; RV64-NEXT: bgeu a0, a1, .LBB62_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB58_2: # %continue +; RV64-NEXT: .LBB62_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sub a1, a0, a1 -; RV32ZBA-NEXT: bgeu a0, a1, .LBB58_2 +; RV32ZBA-NEXT: bgeu a0, a1, .LBB62_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB58_2: # %continue +; RV32ZBA-NEXT: .LBB62_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3361,11 +3592,11 @@ ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: subw a1, a0, a1 ; RV64ZBA-NEXT: sext.w a0, a0 -; RV64ZBA-NEXT: bgeu a0, a1, .LBB58_2 +; RV64ZBA-NEXT: bgeu a0, a1, .LBB62_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB58_2: # %continue +; RV64ZBA-NEXT: .LBB62_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3387,29 +3618,29 @@ ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a3, a1, a3 ; RV32-NEXT: sub a3, a3, a4 -; RV32-NEXT: beq a3, a1, .LBB59_3 +; RV32-NEXT: beq a3, a1, .LBB63_3 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a1, a3 -; RV32-NEXT: bnez a0, .LBB59_4 -; RV32-NEXT: .LBB59_2: # %continue +; RV32-NEXT: bnez a0, .LBB63_4 +; RV32-NEXT: .LBB63_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret -; RV32-NEXT: .LBB59_3: +; RV32-NEXT: .LBB63_3: ; RV32-NEXT: sub a1, a0, a2 ; RV32-NEXT: sltu a0, a0, a1 -; RV32-NEXT: beqz a0, .LBB59_2 -; RV32-NEXT: .LBB59_4: # %overflow +; RV32-NEXT: beqz a0, .LBB63_2 +; RV32-NEXT: .LBB63_4: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: usubo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a1, a0, a1 -; RV64-NEXT: bgeu a0, a1, .LBB59_2 +; RV64-NEXT: bgeu a0, a1, .LBB63_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB59_2: # %continue +; RV64-NEXT: .LBB63_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3418,29 +3649,29 @@ ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a3, a1, a3 ; RV32ZBA-NEXT: sub a3, a3, a4 -; RV32ZBA-NEXT: beq a3, a1, .LBB59_3 +; RV32ZBA-NEXT: beq a3, a1, .LBB63_3 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a1, a3 -; RV32ZBA-NEXT: bnez a0, .LBB59_4 -; RV32ZBA-NEXT: .LBB59_2: # %continue +; RV32ZBA-NEXT: bnez a0, .LBB63_4 +; RV32ZBA-NEXT: .LBB63_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB59_3: +; RV32ZBA-NEXT: .LBB63_3: ; RV32ZBA-NEXT: sub a1, a0, a2 ; RV32ZBA-NEXT: sltu a0, a0, a1 -; RV32ZBA-NEXT: beqz a0, .LBB59_2 -; RV32ZBA-NEXT: .LBB59_4: # %overflow +; RV32ZBA-NEXT: beqz a0, .LBB63_2 +; RV32ZBA-NEXT: .LBB63_4: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: usubo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sub a1, a0, a1 -; RV64ZBA-NEXT: bgeu a0, a1, .LBB59_2 +; RV64ZBA-NEXT: bgeu a0, a1, .LBB63_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB59_2: # %continue +; RV64ZBA-NEXT: .LBB63_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3462,11 +3693,11 @@ ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: srai a0, a0, 31 -; RV32-NEXT: beq a2, a0, .LBB60_2 +; RV32-NEXT: beq a2, a0, .LBB64_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB60_2: # %continue +; RV32-NEXT: .LBB64_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3476,11 +3707,11 @@ ; RV64-NEXT: sext.w a0, a0 ; RV64-NEXT: mul a2, a0, a1 ; RV64-NEXT: mulw a0, a0, a1 -; RV64-NEXT: beq a0, a2, .LBB60_2 +; RV64-NEXT: beq a0, a2, .LBB64_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB60_2: # %continue +; RV64-NEXT: .LBB64_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3489,11 +3720,11 @@ ; RV32ZBA-NEXT: mulh a2, a0, a1 ; RV32ZBA-NEXT: mul a0, a0, a1 ; RV32ZBA-NEXT: srai a0, a0, 31 -; RV32ZBA-NEXT: beq a2, a0, .LBB60_2 +; RV32ZBA-NEXT: beq a2, a0, .LBB64_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB60_2: # %continue +; RV32ZBA-NEXT: .LBB64_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3503,11 +3734,11 @@ ; RV64ZBA-NEXT: sext.w a0, a0 ; RV64ZBA-NEXT: mul a2, a0, a1 ; RV64ZBA-NEXT: mulw a0, a0, a1 -; RV64ZBA-NEXT: beq a0, a2, .LBB60_2 +; RV64ZBA-NEXT: beq a0, a2, .LBB64_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB60_2: # %continue +; RV64ZBA-NEXT: .LBB64_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3573,13 +3804,13 @@ ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t6, a1 ; RV32-NEXT: or a0, a1, a0 -; RV32-NEXT: beqz a0, .LBB61_2 +; RV32-NEXT: beqz a0, .LBB65_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 -; RV32-NEXT: j .LBB61_3 -; RV32-NEXT: .LBB61_2: # %continue +; RV32-NEXT: j .LBB65_3 +; RV32-NEXT: .LBB65_2: # %continue ; RV32-NEXT: li a0, 1 -; RV32-NEXT: .LBB61_3: # %overflow +; RV32-NEXT: .LBB65_3: # %overflow ; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -3589,11 +3820,11 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 -; RV64-NEXT: beq a2, a0, .LBB61_2 +; RV64-NEXT: beq a2, a0, .LBB65_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB61_2: # %continue +; RV64-NEXT: .LBB65_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3646,13 +3877,13 @@ ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t6, a1 ; RV32ZBA-NEXT: or a0, a1, a0 -; RV32ZBA-NEXT: beqz a0, .LBB61_2 +; RV32ZBA-NEXT: beqz a0, .LBB65_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 -; RV32ZBA-NEXT: j .LBB61_3 -; RV32ZBA-NEXT: .LBB61_2: # %continue +; RV32ZBA-NEXT: j .LBB65_3 +; RV32ZBA-NEXT: .LBB65_2: # %continue ; RV32ZBA-NEXT: li a0, 1 -; RV32ZBA-NEXT: .LBB61_3: # %overflow +; RV32ZBA-NEXT: .LBB65_3: # %overflow ; RV32ZBA-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: addi sp, sp, 16 ; RV32ZBA-NEXT: ret @@ -3662,11 +3893,11 @@ ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 -; RV64ZBA-NEXT: beq a2, a0, .LBB61_2 +; RV64ZBA-NEXT: beq a2, a0, .LBB65_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB61_2: # %continue +; RV64ZBA-NEXT: .LBB65_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3723,11 +3954,11 @@ ; RV32-NEXT: xor a0, a0, a1 ; RV32-NEXT: xor a1, t4, a1 ; RV32-NEXT: or a0, a1, a0 -; RV32-NEXT: beqz a0, .LBB62_2 +; RV32-NEXT: beqz a0, .LBB66_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB62_2: # %continue +; RV32-NEXT: .LBB66_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3737,11 +3968,11 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: srai a0, a0, 63 -; RV64-NEXT: beq a2, a0, .LBB62_2 +; RV64-NEXT: beq a2, a0, .LBB66_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB62_2: # %continue +; RV64-NEXT: .LBB66_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3785,11 +4016,11 @@ ; RV32ZBA-NEXT: xor a0, a0, a1 ; RV32ZBA-NEXT: xor a1, t4, a1 ; RV32ZBA-NEXT: or a0, a1, a0 -; RV32ZBA-NEXT: beqz a0, .LBB62_2 +; RV32ZBA-NEXT: beqz a0, .LBB66_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB62_2: # %continue +; RV32ZBA-NEXT: .LBB66_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3799,11 +4030,11 @@ ; RV64ZBA-NEXT: mulh a2, a0, a1 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srai a0, a0, 63 -; RV64ZBA-NEXT: beq a2, a0, .LBB62_2 +; RV64ZBA-NEXT: beq a2, a0, .LBB66_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB62_2: # %continue +; RV64ZBA-NEXT: .LBB66_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3823,11 +4054,11 @@ ; RV32-LABEL: umulo.br.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a0, a0, a1 -; RV32-NEXT: beqz a0, .LBB63_2 +; RV32-NEXT: beqz a0, .LBB67_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB63_2: # %continue +; RV32-NEXT: .LBB67_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; @@ -3837,22 +4068,22 @@ ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: mulhu a0, a0, a1 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: beqz a0, .LBB63_2 +; RV64-NEXT: beqz a0, .LBB67_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB63_2: # %continue +; RV64-NEXT: .LBB67_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: umulo.br.i32: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mulhu a0, a0, a1 -; RV32ZBA-NEXT: beqz a0, .LBB63_2 +; RV32ZBA-NEXT: beqz a0, .LBB67_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB63_2: # %continue +; RV32ZBA-NEXT: .LBB67_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3862,11 +4093,11 @@ ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: mul a0, a0, a1 ; RV64ZBA-NEXT: srli a0, a0, 32 -; RV64ZBA-NEXT: beqz a0, .LBB63_2 +; RV64ZBA-NEXT: beqz a0, .LBB67_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB63_2: # %continue +; RV64ZBA-NEXT: .LBB67_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3901,22 +4132,22 @@ ; RV32-NEXT: snez a0, a0 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: or a0, a0, a4 -; RV32-NEXT: beqz a0, .LBB64_2 +; RV32-NEXT: beqz a0, .LBB68_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB64_2: # %continue +; RV32-NEXT: .LBB68_2: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a0, a0, a1 -; RV64-NEXT: beqz a0, .LBB64_2 +; RV64-NEXT: beqz a0, .LBB68_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB64_2: # %continue +; RV64-NEXT: .LBB68_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -3938,22 +4169,22 @@ ; RV32ZBA-NEXT: snez a0, a0 ; RV32ZBA-NEXT: or a0, a1, a0 ; RV32ZBA-NEXT: or a0, a0, a4 -; RV32ZBA-NEXT: beqz a0, .LBB64_2 +; RV32ZBA-NEXT: beqz a0, .LBB68_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB64_2: # %continue +; RV32ZBA-NEXT: .LBB68_2: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: mulhu a0, a0, a1 -; RV64ZBA-NEXT: beqz a0, .LBB64_2 +; RV64ZBA-NEXT: beqz a0, .LBB68_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB64_2: # %continue +; RV64ZBA-NEXT: .LBB68_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: @@ -3976,26 +4207,26 @@ ; RV32-NEXT: sltu a0, a2, a0 ; RV32-NEXT: add a2, a1, a1 ; RV32-NEXT: add a2, a2, a0 -; RV32-NEXT: beq a2, a1, .LBB65_2 +; RV32-NEXT: beq a2, a1, .LBB69_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: sltu a0, a2, a1 -; RV32-NEXT: .LBB65_2: # %entry -; RV32-NEXT: beqz a0, .LBB65_4 +; RV32-NEXT: .LBB69_2: # %entry +; RV32-NEXT: beqz a0, .LBB69_4 ; RV32-NEXT: # %bb.3: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret -; RV32-NEXT: .LBB65_4: # %continue +; RV32-NEXT: .LBB69_4: # %continue ; RV32-NEXT: li a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: umulo2.br.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a1, a0, a0 -; RV64-NEXT: bgeu a1, a0, .LBB65_2 +; RV64-NEXT: bgeu a1, a0, .LBB69_2 ; RV64-NEXT: # %bb.1: # %overflow ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret -; RV64-NEXT: .LBB65_2: # %continue +; RV64-NEXT: .LBB69_2: # %continue ; RV64-NEXT: li a0, 1 ; RV64-NEXT: ret ; @@ -4005,26 +4236,26 @@ ; RV32ZBA-NEXT: sltu a0, a2, a0 ; RV32ZBA-NEXT: add a2, a1, a1 ; RV32ZBA-NEXT: add a2, a2, a0 -; RV32ZBA-NEXT: beq a2, a1, .LBB65_2 +; RV32ZBA-NEXT: beq a2, a1, .LBB69_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: sltu a0, a2, a1 -; RV32ZBA-NEXT: .LBB65_2: # %entry -; RV32ZBA-NEXT: beqz a0, .LBB65_4 +; RV32ZBA-NEXT: .LBB69_2: # %entry +; RV32ZBA-NEXT: beqz a0, .LBB69_4 ; RV32ZBA-NEXT: # %bb.3: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret -; RV32ZBA-NEXT: .LBB65_4: # %continue +; RV32ZBA-NEXT: .LBB69_4: # %continue ; RV32ZBA-NEXT: li a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: umulo2.br.i64: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: add a1, a0, a0 -; RV64ZBA-NEXT: bgeu a1, a0, .LBB65_2 +; RV64ZBA-NEXT: bgeu a1, a0, .LBB69_2 ; RV64ZBA-NEXT: # %bb.1: # %overflow ; RV64ZBA-NEXT: li a0, 0 ; RV64ZBA-NEXT: ret -; RV64ZBA-NEXT: .LBB65_2: # %continue +; RV64ZBA-NEXT: .LBB69_2: # %continue ; RV64ZBA-NEXT: li a0, 1 ; RV64ZBA-NEXT: ret entry: