diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4296,6 +4296,15 @@ } SDValue TargetCC = DAG.getCondCode(CCVal); + + if (isa(TrueV) && !isa(FalseV)) { + // (select (setcc lhs, rhs, CC), constant, falsev) + // -> (select (setcc lhs, rhs, InverseCC), falsev, constant) + std::swap(TrueV, FalseV); + TargetCC = + DAG.getCondCode(ISD::getSetCCInverse(CCVal, LHS.getValueType())); + } + SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; return DAG.getNode(RISCVISD::SELECT_CC, DL, VT, Ops); } diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -292,21 +292,22 @@ ; RV32I-NEXT: and a0, s2, a0 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __mulsi3@plt -; RV32I-NEXT: li a1, 32 -; RV32I-NEXT: beqz s2, .LBB3_2 +; RV32I-NEXT: bnez s2, .LBB3_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: srli a0, a0, 27 -; RV32I-NEXT: add a0, s4, a0 -; RV32I-NEXT: lbu a1, 0(a0) +; RV32I-NEXT: li a0, 32 +; RV32I-NEXT: beqz s0, .LBB3_4 ; RV32I-NEXT: .LBB3_2: -; RV32I-NEXT: bnez s0, .LBB3_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi a0, a1, 32 -; RV32I-NEXT: j .LBB3_5 -; RV32I-NEXT: .LBB3_4: ; RV32I-NEXT: srli a0, s1, 27 ; RV32I-NEXT: add a0, s4, a0 ; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: j .LBB3_5 +; RV32I-NEXT: .LBB3_3: +; RV32I-NEXT: srli a0, a0, 27 +; RV32I-NEXT: add a0, s4, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: bnez s0, .LBB3_2 +; RV32I-NEXT: .LBB3_4: +; RV32I-NEXT: addi a0, a0, 32 ; RV32I-NEXT: .LBB3_5: ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload @@ -347,22 +348,11 @@ ; RV32M-NEXT: addi a2, a2, 1329 ; RV32M-NEXT: lui a3, %hi(.LCPI3_0) ; RV32M-NEXT: addi a3, a3, %lo(.LCPI3_0) -; RV32M-NEXT: li a4, 32 -; RV32M-NEXT: beqz a1, .LBB3_2 +; RV32M-NEXT: bnez a1, .LBB3_3 ; RV32M-NEXT: # %bb.1: -; RV32M-NEXT: neg a4, a1 -; RV32M-NEXT: and a1, a1, a4 -; RV32M-NEXT: mul a1, a1, a2 -; RV32M-NEXT: srli a1, a1, 27 -; RV32M-NEXT: add a1, a3, a1 -; RV32M-NEXT: lbu a4, 0(a1) +; RV32M-NEXT: li a1, 32 +; RV32M-NEXT: beqz a0, .LBB3_4 ; RV32M-NEXT: .LBB3_2: -; RV32M-NEXT: bnez a0, .LBB3_4 -; RV32M-NEXT: # %bb.3: -; RV32M-NEXT: addi a0, a4, 32 -; RV32M-NEXT: li a1, 0 -; RV32M-NEXT: ret -; RV32M-NEXT: .LBB3_4: ; RV32M-NEXT: neg a1, a0 ; RV32M-NEXT: and a0, a0, a1 ; RV32M-NEXT: mul a0, a0, a2 @@ -371,6 +361,18 @@ ; RV32M-NEXT: lbu a0, 0(a0) ; RV32M-NEXT: li a1, 0 ; RV32M-NEXT: ret +; RV32M-NEXT: .LBB3_3: +; RV32M-NEXT: neg a4, a1 +; RV32M-NEXT: and a1, a1, a4 +; RV32M-NEXT: mul a1, a1, a2 +; RV32M-NEXT: srli a1, a1, 27 +; RV32M-NEXT: add a1, a3, a1 +; RV32M-NEXT: lbu a1, 0(a1) +; RV32M-NEXT: bnez a0, .LBB3_2 +; RV32M-NEXT: .LBB3_4: +; RV32M-NEXT: addi a0, a1, 32 +; RV32M-NEXT: li a1, 0 +; RV32M-NEXT: ret ; ; RV64M-LABEL: test_cttz_i64: ; RV64M: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll @@ -133,11 +133,10 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind { ; CHECKIFD-LABEL: fcvt_wu_d_multiple_use: ; CHECKIFD: # %bb.0: -; CHECKIFD-NEXT: fcvt.wu.d a1, fa0, rtz -; CHECKIFD-NEXT: li a0, 1 -; CHECKIFD-NEXT: beqz a1, .LBB4_2 +; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz +; CHECKIFD-NEXT: bnez a0, .LBB4_2 ; CHECKIFD-NEXT: # %bb.1: -; CHECKIFD-NEXT: mv a0, a1 +; CHECKIFD-NEXT: li a0, 1 ; CHECKIFD-NEXT: .LBB4_2: ; CHECKIFD-NEXT: ret ; @@ -146,11 +145,9 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __fixunsdfsi@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: beqz a1, .LBB4_2 +; RV32I-NEXT: bnez a0, .LBB4_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: .LBB4_2: ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -161,11 +158,9 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __fixunsdfsi@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: beqz a1, .LBB4_2 +; RV64I-NEXT: bnez a0, .LBB4_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -121,19 +121,19 @@ ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: lui s4, 524288 -; RV32I-NEXT: bltz s3, .LBB3_2 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: lui a0, 524288 +; RV32I-NEXT: bgez s4, .LBB3_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: lui s3, 524288 ; RV32I-NEXT: .LBB3_2: # %start ; RV32I-NEXT: blez s2, .LBB3_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s4, a1, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s3, a0, -1 ; RV32I-NEXT: .LBB3_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -142,7 +142,7 @@ ; RV32I-NEXT: call __unorddf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s4 +; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -165,14 +165,14 @@ ; RV64I-NEXT: slli a1, a0, 53 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixdfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui s3, 524288 -; RV64I-NEXT: lui s2, 524288 -; RV64I-NEXT: bltz s1, .LBB3_2 +; RV64I-NEXT: bgez s2, .LBB3_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB3_2: # %start ; RV64I-NEXT: li a0, 527 ; RV64I-NEXT: slli a0, a0, 31 @@ -181,15 +181,15 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: blez a0, .LBB3_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addiw s2, s3, -1 +; RV64I-NEXT: # %bb.3: # %start +; RV64I-NEXT: addiw s1, s3, -1 ; RV64I-NEXT: .LBB3_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -237,11 +237,10 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind { ; CHECKIFD-LABEL: fcvt_wu_d_multiple_use: ; CHECKIFD: # %bb.0: -; CHECKIFD-NEXT: fcvt.wu.d a1, fa0, rtz -; CHECKIFD-NEXT: li a0, 1 -; CHECKIFD-NEXT: beqz a1, .LBB5_2 +; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz +; CHECKIFD-NEXT: bnez a0, .LBB5_2 ; CHECKIFD-NEXT: # %bb.1: -; CHECKIFD-NEXT: mv a0, a1 +; CHECKIFD-NEXT: li a0, 1 ; CHECKIFD-NEXT: .LBB5_2: ; CHECKIFD-NEXT: ret ; @@ -250,11 +249,9 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __fixunsdfsi@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: beqz a1, .LBB5_2 +; RV32I-NEXT: bnez a0, .LBB5_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: .LBB5_2: ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -265,11 +262,9 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __fixunsdfsi@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: beqz a1, .LBB5_2 +; RV64I-NEXT: bnez a0, .LBB5_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: .LBB5_2: ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -358,15 +353,15 @@ ; RV64I-NEXT: slli a1, a0, 21 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: bgtz a0, .LBB6_2 +; RV64I-NEXT: blez a0, .LBB6_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: j .LBB6_3 +; RV64I-NEXT: .LBB6_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 -; RV64I-NEXT: j .LBB6_3 -; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: .LBB6_3: # %start ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -613,20 +608,20 @@ ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s6, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfdi@plt ; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s5, a1 ; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: lui s6, 524288 -; RV32I-NEXT: bltz s5, .LBB12_2 +; RV32I-NEXT: bgez s6, .LBB12_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s6, a1 +; RV32I-NEXT: lui s5, 524288 ; RV32I-NEXT: .LBB12_2: # %start ; RV32I-NEXT: blez s4, .LBB12_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s6, a0, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s5, a0, -1 ; RV32I-NEXT: .LBB12_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -635,7 +630,7 @@ ; RV32I-NEXT: call __unorddf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and s4, a0, s6 +; RV32I-NEXT: and s4, a0, s5 ; RV32I-NEXT: lui a3, 802304 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -688,23 +683,21 @@ ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixdfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: li s3, -1 -; RV64I-NEXT: bltz s2, .LBB12_2 +; RV64I-NEXT: bgez s2, .LBB12_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: j .LBB12_3 -; RV64I-NEXT: .LBB12_2: ; RV64I-NEXT: slli s1, s3, 63 -; RV64I-NEXT: .LBB12_3: # %start +; RV64I-NEXT: .LBB12_2: # %start ; RV64I-NEXT: li a0, 543 ; RV64I-NEXT: slli a0, a0, 53 ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: blez a0, .LBB12_5 -; RV64I-NEXT: # %bb.4: +; RV64I-NEXT: blez a0, .LBB12_4 +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: srli s1, s3, 1 -; RV64I-NEXT: .LBB12_5: # %start +; RV64I-NEXT: .LBB12_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt @@ -1353,19 +1346,19 @@ ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt -; RV32I-NEXT: lui s4, 1048568 -; RV32I-NEXT: bltz s3, .LBB26_2 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: bgez s4, .LBB26_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: lui s3, 1048568 ; RV32I-NEXT: .LBB26_2: # %start ; RV32I-NEXT: blez s2, .LBB26_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: # %bb.3: # %start ; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s4, a0, -1 +; RV32I-NEXT: addi s3, a0, -1 ; RV32I-NEXT: .LBB26_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -1374,7 +1367,7 @@ ; RV32I-NEXT: call __unorddf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s4 +; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload @@ -1398,13 +1391,13 @@ ; RV64I-NEXT: slli a1, a0, 53 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixdfdi@plt -; RV64I-NEXT: lui s2, 1048568 -; RV64I-NEXT: bltz s1, .LBB26_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB26_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 1048568 ; RV64I-NEXT: .LBB26_2: # %start ; RV64I-NEXT: lui a0, 4152 ; RV64I-NEXT: addiw a0, a0, -1 @@ -1412,16 +1405,16 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: blez a0, .LBB26_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: addiw s1, a0, -1 ; RV64I-NEXT: .LBB26_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -1516,14 +1509,16 @@ ; RV32I-NEXT: call __fixunsdfsi@plt ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgtz s3, .LBB28_2 +; RV32I-NEXT: blez s3, .LBB28_2 ; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: j .LBB28_3 +; RV32I-NEXT: .LBB28_2: ; RV32I-NEXT: slti a2, s0, 0 ; RV32I-NEXT: addi a2, a2, -1 -; RV32I-NEXT: and a2, a2, a0 -; RV32I-NEXT: .LBB28_2: # %start -; RV32I-NEXT: and a0, a2, a1 +; RV32I-NEXT: and a0, a2, a0 +; RV32I-NEXT: .LBB28_3: # %start +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1553,14 +1548,16 @@ ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgtz a0, .LBB28_2 +; RV64I-NEXT: blez a0, .LBB28_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: j .LBB28_3 +; RV64I-NEXT: .LBB28_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a2, a0, s1 -; RV64I-NEXT: .LBB28_2: # %start -; RV64I-NEXT: and a0, a2, a1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: .LBB28_3: # %start +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1644,6 +1641,7 @@ ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a3, 263676 @@ -1655,19 +1653,18 @@ ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt -; RV32I-NEXT: li a1, -128 -; RV32I-NEXT: bltz s3, .LBB30_2 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: bgez s4, .LBB30_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: li s3, -128 ; RV32I-NEXT: .LBB30_2: # %start -; RV32I-NEXT: li s3, 127 -; RV32I-NEXT: bgtz s2, .LBB30_4 +; RV32I-NEXT: blez s2, .LBB30_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s3, a1 +; RV32I-NEXT: li s3, 127 ; RV32I-NEXT: .LBB30_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -1684,6 +1681,7 @@ ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 32 ; RV32I-NEXT: ret ; @@ -1699,22 +1697,21 @@ ; RV64I-NEXT: slli a1, a0, 53 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixdfdi@plt -; RV64I-NEXT: li s2, -128 -; RV64I-NEXT: bltz s1, .LBB30_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB30_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: li s1, -128 ; RV64I-NEXT: .LBB30_2: # %start ; RV64I-NEXT: lui a0, 65919 ; RV64I-NEXT: slli a1, a0, 34 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: li s1, 127 -; RV64I-NEXT: bgtz a0, .LBB30_4 +; RV64I-NEXT: blez a0, .LBB30_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv s1, s2 +; RV64I-NEXT: li s1, 127 ; RV64I-NEXT: .LBB30_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -1816,14 +1813,16 @@ ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: call __fixunsdfsi@plt -; RV32I-NEXT: li a1, 255 -; RV32I-NEXT: bgtz s3, .LBB32_2 +; RV32I-NEXT: blez s3, .LBB32_2 ; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: li a0, 255 +; RV32I-NEXT: j .LBB32_3 +; RV32I-NEXT: .LBB32_2: ; RV32I-NEXT: slti a1, s0, 0 ; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: and a1, a1, a0 -; RV32I-NEXT: .LBB32_2: # %start -; RV32I-NEXT: andi a0, a1, 255 +; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: .LBB32_3: # %start +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1850,14 +1849,16 @@ ; RV64I-NEXT: slli a1, a0, 33 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: li a1, 255 -; RV64I-NEXT: bgtz a0, .LBB32_2 +; RV64I-NEXT: blez a0, .LBB32_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, 255 +; RV64I-NEXT: j .LBB32_3 +; RV64I-NEXT: .LBB32_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a1, a0, s1 -; RV64I-NEXT: .LBB32_2: # %start -; RV64I-NEXT: andi a0, a1, 255 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: .LBB32_3: # %start +; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1948,15 +1949,15 @@ ; RV64I-NEXT: slli a1, a0, 21 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt -; RV64I-NEXT: bgtz a0, .LBB33_2 +; RV64I-NEXT: blez a0, .LBB33_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: j .LBB33_3 +; RV64I-NEXT: .LBB33_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 -; RV64I-NEXT: j .LBB33_3 -; RV64I-NEXT: .LBB33_2: -; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: .LBB33_3: # %start ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 @@ -2003,19 +2004,19 @@ ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __fixdfsi@plt -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: lui s4, 524288 -; RV32I-NEXT: bltz s3, .LBB34_2 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: lui a0, 524288 +; RV32I-NEXT: bgez s4, .LBB34_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s4, a0 +; RV32I-NEXT: lui s3, 524288 ; RV32I-NEXT: .LBB34_2: # %start ; RV32I-NEXT: blez s2, .LBB34_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s4, a1, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s3, a0, -1 ; RV32I-NEXT: .LBB34_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -2024,7 +2025,7 @@ ; RV32I-NEXT: call __unorddf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s4 +; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -2047,14 +2048,14 @@ ; RV64I-NEXT: slli a1, a0, 53 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixdfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui s3, 524288 -; RV64I-NEXT: lui s2, 524288 -; RV64I-NEXT: bltz s1, .LBB34_2 +; RV64I-NEXT: bgez s2, .LBB34_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB34_2: # %start ; RV64I-NEXT: li a0, 527 ; RV64I-NEXT: slli a0, a0, 31 @@ -2063,15 +2064,15 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: blez a0, .LBB34_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addiw s2, s3, -1 +; RV64I-NEXT: # %bb.3: # %start +; RV64I-NEXT: addiw s1, s3, -1 ; RV64I-NEXT: .LBB34_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll @@ -75,11 +75,10 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind { ; CHECKIF-LABEL: fcvt_wu_s_multiple_use: ; CHECKIF: # %bb.0: -; CHECKIF-NEXT: fcvt.wu.s a1, fa0, rtz -; CHECKIF-NEXT: li a0, 1 -; CHECKIF-NEXT: beqz a1, .LBB2_2 +; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz +; CHECKIF-NEXT: bnez a0, .LBB2_2 ; CHECKIF-NEXT: # %bb.1: -; CHECKIF-NEXT: mv a0, a1 +; CHECKIF-NEXT: li a0, 1 ; CHECKIF-NEXT: .LBB2_2: ; CHECKIF-NEXT: ret ; @@ -88,11 +87,9 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: beqz a1, .LBB2_2 +; RV32I-NEXT: bnez a0, .LBB2_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -103,11 +100,9 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __fixunssfsi@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: beqz a1, .LBB2_2 +; RV64I-NEXT: bnez a0, .LBB2_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: .LBB2_2: ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -56,29 +56,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 847872 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui s3, 524288 -; RV32I-NEXT: lui s2, 524288 -; RV32I-NEXT: bltz s1, .LBB1_2 +; RV32I-NEXT: bgez s2, .LBB1_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB1_2: # %start ; RV32I-NEXT: lui a0, 323584 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB1_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s2, s3, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s1, s3, -1 ; RV32I-NEXT: .LBB1_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -98,29 +98,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 847872 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui s3, 524288 -; RV64I-NEXT: lui s2, 524288 -; RV64I-NEXT: bltz s1, .LBB1_2 +; RV64I-NEXT: bgez s2, .LBB1_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB1_2: # %start ; RV64I-NEXT: lui a0, 323584 ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB1_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addiw s2, s3, -1 +; RV64I-NEXT: # %bb.3: # %start +; RV64I-NEXT: addiw s1, s3, -1 ; RV64I-NEXT: .LBB1_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -166,11 +166,10 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind { ; CHECKIF-LABEL: fcvt_wu_s_multiple_use: ; CHECKIF: # %bb.0: -; CHECKIF-NEXT: fcvt.wu.s a1, fa0, rtz -; CHECKIF-NEXT: li a0, 1 -; CHECKIF-NEXT: beqz a1, .LBB3_2 +; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz +; CHECKIF-NEXT: bnez a0, .LBB3_2 ; CHECKIF-NEXT: # %bb.1: -; CHECKIF-NEXT: mv a0, a1 +; CHECKIF-NEXT: li a0, 1 ; CHECKIF-NEXT: .LBB3_2: ; CHECKIF-NEXT: ret ; @@ -179,11 +178,9 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: beqz a1, .LBB3_2 +; RV32I-NEXT: bnez a0, .LBB3_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -194,11 +191,9 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __fixunssfsi@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: beqz a1, .LBB3_2 +; RV64I-NEXT: bnez a0, .LBB3_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: .LBB3_2: ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -275,15 +270,15 @@ ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: bgtz a0, .LBB4_2 +; RV64I-NEXT: blez a0, .LBB4_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: j .LBB4_3 +; RV64I-NEXT: .LBB4_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 -; RV64I-NEXT: j .LBB4_3 -; RV64I-NEXT: .LBB4_2: -; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: .LBB4_3: # %start ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -583,11 +578,11 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfdi@plt ; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s3, a1 ; RV32I-NEXT: lui s4, 524288 -; RV32I-NEXT: lui s3, 524288 -; RV32I-NEXT: bltz s2, .LBB12_2 +; RV32I-NEXT: bgez s2, .LBB12_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s3, a1 +; RV32I-NEXT: lui s3, 524288 ; RV32I-NEXT: .LBB12_2: # %start ; RV32I-NEXT: lui a0, 389120 ; RV32I-NEXT: addi s2, a0, -1 @@ -595,7 +590,7 @@ ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB12_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: # %bb.3: # %start ; RV32I-NEXT: addi s3, s4, -1 ; RV32I-NEXT: .LBB12_4: # %start ; RV32I-NEXT: mv a0, s0 @@ -646,22 +641,20 @@ ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: li s3, -1 -; RV64I-NEXT: bltz s2, .LBB12_2 +; RV64I-NEXT: bgez s2, .LBB12_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: j .LBB12_3 -; RV64I-NEXT: .LBB12_2: ; RV64I-NEXT: slli s1, s3, 63 -; RV64I-NEXT: .LBB12_3: # %start +; RV64I-NEXT: .LBB12_2: # %start ; RV64I-NEXT: lui a0, 389120 ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: blez a0, .LBB12_5 -; RV64I-NEXT: # %bb.4: +; RV64I-NEXT: blez a0, .LBB12_4 +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: srli s1, s3, 1 -; RV64I-NEXT: .LBB12_5: # %start +; RV64I-NEXT: .LBB12_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt @@ -1197,29 +1190,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 815104 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt -; RV32I-NEXT: lui s2, 1048568 -; RV32I-NEXT: bltz s1, .LBB24_2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: bgez s2, .LBB24_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 1048568 ; RV32I-NEXT: .LBB24_2: # %start ; RV32I-NEXT: lui a0, 290816 ; RV32I-NEXT: addi a1, a0, -512 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB24_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: # %bb.3: # %start ; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: addi s1, a0, -1 ; RV32I-NEXT: .LBB24_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1239,29 +1232,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 815104 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt -; RV64I-NEXT: lui s2, 1048568 -; RV64I-NEXT: bltz s1, .LBB24_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB24_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 1048568 ; RV64I-NEXT: .LBB24_2: # %start ; RV64I-NEXT: lui a0, 290816 ; RV64I-NEXT: addiw a1, a0, -512 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB24_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: addiw s1, a0, -1 ; RV64I-NEXT: .LBB24_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -1349,14 +1342,16 @@ ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgtz a0, .LBB26_2 +; RV32I-NEXT: blez a0, .LBB26_2 ; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: j .LBB26_3 +; RV32I-NEXT: .LBB26_2: ; RV32I-NEXT: slti a0, s0, 0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a2, a0, s1 -; RV32I-NEXT: .LBB26_2: # %start -; RV32I-NEXT: and a0, a2, a1 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: .LBB26_3: # %start +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -1384,14 +1379,16 @@ ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgtz a0, .LBB26_2 +; RV64I-NEXT: blez a0, .LBB26_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: j .LBB26_3 +; RV64I-NEXT: .LBB26_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a2, a0, s1 -; RV64I-NEXT: .LBB26_2: # %start -; RV64I-NEXT: and a0, a2, a1 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: .LBB26_3: # %start +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1477,21 +1474,20 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 798720 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt -; RV32I-NEXT: li s2, -128 -; RV32I-NEXT: bltz s1, .LBB28_2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: bgez s2, .LBB28_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: li s1, -128 ; RV32I-NEXT: .LBB28_2: # %start ; RV32I-NEXT: lui a1, 274400 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: li s1, 127 -; RV32I-NEXT: bgtz a0, .LBB28_4 +; RV32I-NEXT: blez a0, .LBB28_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s1, s2 +; RV32I-NEXT: li s1, 127 ; RV32I-NEXT: .LBB28_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 @@ -1518,21 +1514,20 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 798720 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt -; RV64I-NEXT: li s2, -128 -; RV64I-NEXT: bltz s1, .LBB28_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB28_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: li s1, -128 ; RV64I-NEXT: .LBB28_2: # %start ; RV64I-NEXT: lui a1, 274400 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: li s1, 127 -; RV64I-NEXT: bgtz a0, .LBB28_4 +; RV64I-NEXT: blez a0, .LBB28_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv s1, s2 +; RV64I-NEXT: li s1, 127 ; RV64I-NEXT: .LBB28_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -1624,14 +1619,16 @@ ; RV32I-NEXT: lui a1, 276464 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: li a1, 255 -; RV32I-NEXT: bgtz a0, .LBB30_2 +; RV32I-NEXT: blez a0, .LBB30_2 ; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: li a0, 255 +; RV32I-NEXT: j .LBB30_3 +; RV32I-NEXT: .LBB30_2: ; RV32I-NEXT: slti a0, s0, 0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a1, a0, s1 -; RV32I-NEXT: .LBB30_2: # %start -; RV32I-NEXT: andi a0, a1, 255 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: .LBB30_3: # %start +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -1656,14 +1653,16 @@ ; RV64I-NEXT: lui a1, 276464 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: li a1, 255 -; RV64I-NEXT: bgtz a0, .LBB30_2 +; RV64I-NEXT: blez a0, .LBB30_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, 255 +; RV64I-NEXT: j .LBB30_3 +; RV64I-NEXT: .LBB30_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a1, a0, s1 -; RV64I-NEXT: .LBB30_2: # %start -; RV64I-NEXT: andi a0, a1, 255 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: .LBB30_3: # %start +; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1742,15 +1741,15 @@ ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: bgtz a0, .LBB31_2 +; RV64I-NEXT: blez a0, .LBB31_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: j .LBB31_3 +; RV64I-NEXT: .LBB31_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 -; RV64I-NEXT: j .LBB31_3 -; RV64I-NEXT: .LBB31_2: -; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: .LBB31_3: # %start ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 @@ -1786,29 +1785,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 847872 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui s3, 524288 -; RV32I-NEXT: lui s2, 524288 -; RV32I-NEXT: bltz s1, .LBB32_2 +; RV32I-NEXT: bgez s2, .LBB32_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB32_2: # %start ; RV32I-NEXT: lui a0, 323584 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB32_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s2, s3, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s1, s3, -1 ; RV32I-NEXT: .LBB32_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1828,29 +1827,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 847872 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui s3, 524288 -; RV64I-NEXT: lui s2, 524288 -; RV64I-NEXT: bltz s1, .LBB32_2 +; RV64I-NEXT: bgez s2, .LBB32_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB32_2: # %start ; RV64I-NEXT: lui a0, 323584 ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB32_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addiw s2, s3, -1 +; RV64I-NEXT: # %bb.3: # %start +; RV64I-NEXT: addiw s1, s3, -1 ; RV64I-NEXT: .LBB32_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll @@ -112,31 +112,28 @@ define i32 @fcvt_wu_h_multiple_use(half %x, i32* %y) { ; CHECKIZFH-LABEL: fcvt_wu_h_multiple_use: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; CHECKIZFH-NEXT: li a0, 1 -; CHECKIZFH-NEXT: beqz a1, .LBB4_2 +; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rtz +; CHECKIZFH-NEXT: bnez a0, .LBB4_2 ; CHECKIZFH-NEXT: # %bb.1: -; CHECKIZFH-NEXT: mv a0, a1 +; CHECKIZFH-NEXT: li a0, 1 ; CHECKIZFH-NEXT: .LBB4_2: ; CHECKIZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_wu_h_multiple_use: ; RV32IDZFH: # %bb.0: -; RV32IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV32IDZFH-NEXT: li a0, 1 -; RV32IDZFH-NEXT: beqz a1, .LBB4_2 +; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz +; RV32IDZFH-NEXT: bnez a0, .LBB4_2 ; RV32IDZFH-NEXT: # %bb.1: -; RV32IDZFH-NEXT: mv a0, a1 +; RV32IDZFH-NEXT: li a0, 1 ; RV32IDZFH-NEXT: .LBB4_2: ; RV32IDZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_wu_h_multiple_use: ; RV64IDZFH: # %bb.0: -; RV64IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV64IDZFH-NEXT: li a0, 1 -; RV64IDZFH-NEXT: beqz a1, .LBB4_2 +; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz +; RV64IDZFH-NEXT: bnez a0, .LBB4_2 ; RV64IDZFH-NEXT: # %bb.1: -; RV64IDZFH-NEXT: mv a0, a1 +; RV64IDZFH-NEXT: li a0, 1 ; RV64IDZFH-NEXT: .LBB4_2: ; RV64IDZFH-NEXT: ret %a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") strictfp diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -138,29 +138,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 815104 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt -; RV32I-NEXT: lui s2, 1048568 -; RV32I-NEXT: bltz s1, .LBB1_2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: bgez s2, .LBB1_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 1048568 ; RV32I-NEXT: .LBB1_2: # %start ; RV32I-NEXT: lui a0, 290816 ; RV32I-NEXT: addi a1, a0, -512 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB1_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: # %bb.3: # %start ; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: addi s1, a0, -1 ; RV32I-NEXT: .LBB1_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -181,29 +181,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 815104 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt -; RV64I-NEXT: lui s2, 1048568 -; RV64I-NEXT: bltz s1, .LBB1_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB1_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 1048568 ; RV64I-NEXT: .LBB1_2: # %start ; RV64I-NEXT: lui a0, 290816 ; RV64I-NEXT: addiw a1, a0, -512 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB1_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: addiw s1, a0, -1 ; RV64I-NEXT: .LBB1_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -333,7 +333,7 @@ ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: bgtz a0, .LBB3_2 -; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: slti a0, s2, 0 ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and s0, a0, s1 @@ -371,7 +371,7 @@ ; RV64I-NEXT: mv a0, s3 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: bgtz a0, .LBB3_2 -; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: slti a0, s2, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and s0, a0, s1 @@ -475,29 +475,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 847872 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui s3, 524288 -; RV32I-NEXT: lui s2, 524288 -; RV32I-NEXT: bltz s1, .LBB5_2 +; RV32I-NEXT: bgez s2, .LBB5_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB5_2: # %start ; RV32I-NEXT: lui a0, 323584 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB5_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s2, s3, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s1, s3, -1 ; RV32I-NEXT: .LBB5_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -520,29 +520,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 847872 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui s3, 524288 -; RV64I-NEXT: lui s2, 524288 -; RV64I-NEXT: bltz s1, .LBB5_2 +; RV64I-NEXT: bgez s2, .LBB5_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB5_2: # %start ; RV64I-NEXT: lui a0, 323584 ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB5_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addiw s2, s3, -1 +; RV64I-NEXT: # %bb.3: # %start +; RV64I-NEXT: addiw s1, s3, -1 ; RV64I-NEXT: .LBB5_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -604,31 +604,28 @@ define i32 @fcvt_wu_h_multiple_use(half %x, i32* %y) nounwind { ; CHECKIZFH-LABEL: fcvt_wu_h_multiple_use: ; CHECKIZFH: # %bb.0: -; CHECKIZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; CHECKIZFH-NEXT: li a0, 1 -; CHECKIZFH-NEXT: beqz a1, .LBB7_2 +; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rtz +; CHECKIZFH-NEXT: bnez a0, .LBB7_2 ; CHECKIZFH-NEXT: # %bb.1: -; CHECKIZFH-NEXT: mv a0, a1 +; CHECKIZFH-NEXT: li a0, 1 ; CHECKIZFH-NEXT: .LBB7_2: ; CHECKIZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_wu_h_multiple_use: ; RV32IDZFH: # %bb.0: -; RV32IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV32IDZFH-NEXT: li a0, 1 -; RV32IDZFH-NEXT: beqz a1, .LBB7_2 +; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz +; RV32IDZFH-NEXT: bnez a0, .LBB7_2 ; RV32IDZFH-NEXT: # %bb.1: -; RV32IDZFH-NEXT: mv a0, a1 +; RV32IDZFH-NEXT: li a0, 1 ; RV32IDZFH-NEXT: .LBB7_2: ; RV32IDZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_wu_h_multiple_use: ; RV64IDZFH: # %bb.0: -; RV64IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz -; RV64IDZFH-NEXT: li a0, 1 -; RV64IDZFH-NEXT: beqz a1, .LBB7_2 +; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz +; RV64IDZFH-NEXT: bnez a0, .LBB7_2 ; RV64IDZFH-NEXT: # %bb.1: -; RV64IDZFH-NEXT: mv a0, a1 +; RV64IDZFH-NEXT: li a0, 1 ; RV64IDZFH-NEXT: .LBB7_2: ; RV64IDZFH-NEXT: ret ; @@ -640,11 +637,9 @@ ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: call __fixunssfsi@plt -; RV32I-NEXT: mv a1, a0 -; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: beqz a1, .LBB7_2 +; RV32I-NEXT: bnez a0, .LBB7_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a0, 1 ; RV32I-NEXT: .LBB7_2: ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 @@ -658,11 +653,9 @@ ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: call __fixunssfdi@plt -; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: beqz a1, .LBB7_2 +; RV64I-NEXT: bnez a0, .LBB7_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: .LBB7_2: ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 @@ -768,15 +761,15 @@ ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: bgtz a0, .LBB8_2 +; RV64I-NEXT: blez a0, .LBB8_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: j .LBB8_3 +; RV64I-NEXT: .LBB8_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 -; RV64I-NEXT: j .LBB8_3 -; RV64I-NEXT: .LBB8_2: -; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: .LBB8_3: # %start ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -958,50 +951,50 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 913408 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: mv s3, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfdi@plt ; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: lui s4, 524288 -; RV32I-NEXT: lui s3, 524288 -; RV32I-NEXT: bltz s2, .LBB10_2 +; RV32I-NEXT: bgez s3, .LBB10_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s3, a1 +; RV32I-NEXT: lui s2, 524288 ; RV32I-NEXT: .LBB10_2: # %start ; RV32I-NEXT: lui a0, 389120 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: addi s3, a0, -1 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB10_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s3, s4, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s2, s4, -1 ; RV32I-NEXT: .LBB10_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and s3, a0, s3 +; RV32I-NEXT: and s2, a0, s2 ; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 -; RV32I-NEXT: neg s2, a0 +; RV32I-NEXT: neg s3, a0 ; RV32I-NEXT: lui a1, 913408 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and a0, a0, s1 -; RV32I-NEXT: or s1, s2, a0 +; RV32I-NEXT: or s1, s3, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: and a0, a0, s1 -; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1028,22 +1021,20 @@ ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: li s3, -1 -; RV64I-NEXT: bltz s2, .LBB10_2 +; RV64I-NEXT: bgez s2, .LBB10_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: j .LBB10_3 -; RV64I-NEXT: .LBB10_2: ; RV64I-NEXT: slli s1, s3, 63 -; RV64I-NEXT: .LBB10_3: # %start +; RV64I-NEXT: .LBB10_2: # %start ; RV64I-NEXT: lui a0, 389120 ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: blez a0, .LBB10_5 -; RV64I-NEXT: # %bb.4: +; RV64I-NEXT: blez a0, .LBB10_4 +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: srli s1, s3, 1 -; RV64I-NEXT: .LBB10_5: # %start +; RV64I-NEXT: .LBB10_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt @@ -2262,29 +2253,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 815104 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt -; RV32I-NEXT: lui s2, 1048568 -; RV32I-NEXT: bltz s1, .LBB32_2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: bgez s2, .LBB32_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 1048568 ; RV32I-NEXT: .LBB32_2: # %start ; RV32I-NEXT: lui a0, 290816 ; RV32I-NEXT: addi a1, a0, -512 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB32_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: # %bb.3: # %start ; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: addi s1, a0, -1 ; RV32I-NEXT: .LBB32_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2307,29 +2298,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 815104 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt -; RV64I-NEXT: lui s2, 1048568 -; RV64I-NEXT: bltz s1, .LBB32_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB32_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 1048568 ; RV64I-NEXT: .LBB32_2: # %start ; RV64I-NEXT: lui a0, 290816 ; RV64I-NEXT: addiw a1, a0, -512 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB32_4 -; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: # %bb.3: # %start ; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: addiw s1, a0, -1 ; RV64I-NEXT: .LBB32_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -2459,14 +2450,16 @@ ; RV32I-NEXT: addi a1, a0, -256 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: mv a1, s3 -; RV32I-NEXT: bgtz a0, .LBB34_2 +; RV32I-NEXT: blez a0, .LBB34_2 ; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: mv a0, s3 +; RV32I-NEXT: j .LBB34_3 +; RV32I-NEXT: .LBB34_2: ; RV32I-NEXT: slti a0, s1, 0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a1, a0, s0 -; RV32I-NEXT: .LBB34_2: # %start -; RV32I-NEXT: and a0, a1, s3 +; RV32I-NEXT: and a0, a0, s0 +; RV32I-NEXT: .LBB34_3: # %start +; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -2498,14 +2491,16 @@ ; RV64I-NEXT: addiw a1, a0, -256 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: mv a1, s3 -; RV64I-NEXT: bgtz a0, .LBB34_2 +; RV64I-NEXT: blez a0, .LBB34_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: mv a0, s3 +; RV64I-NEXT: j .LBB34_3 +; RV64I-NEXT: .LBB34_2: ; RV64I-NEXT: slti a0, s1, 0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a1, a0, s0 -; RV64I-NEXT: .LBB34_2: # %start -; RV64I-NEXT: and a0, a1, s3 +; RV64I-NEXT: and a0, a0, s0 +; RV64I-NEXT: .LBB34_3: # %start +; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -2644,21 +2639,20 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 798720 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt -; RV32I-NEXT: li s2, -128 -; RV32I-NEXT: bltz s1, .LBB36_2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: bgez s2, .LBB36_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: li s1, -128 ; RV32I-NEXT: .LBB36_2: # %start ; RV32I-NEXT: lui a1, 274400 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: li s1, 127 -; RV32I-NEXT: bgtz a0, .LBB36_4 +; RV32I-NEXT: blez a0, .LBB36_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: mv s1, s2 +; RV32I-NEXT: li s1, 127 ; RV32I-NEXT: .LBB36_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 @@ -2688,21 +2682,20 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 798720 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt -; RV64I-NEXT: li s2, -128 -; RV64I-NEXT: bltz s1, .LBB36_2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: bgez s2, .LBB36_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: li s1, -128 ; RV64I-NEXT: .LBB36_2: # %start ; RV64I-NEXT: lui a1, 274400 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: li s1, 127 -; RV64I-NEXT: bgtz a0, .LBB36_4 +; RV64I-NEXT: blez a0, .LBB36_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: mv s1, s2 +; RV64I-NEXT: li s1, 127 ; RV64I-NEXT: .LBB36_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -2837,14 +2830,16 @@ ; RV32I-NEXT: lui a1, 276464 ; RV32I-NEXT: mv a0, s2 ; RV32I-NEXT: call __gtsf2@plt -; RV32I-NEXT: li a1, 255 -; RV32I-NEXT: bgtz a0, .LBB38_2 +; RV32I-NEXT: blez a0, .LBB38_2 ; RV32I-NEXT: # %bb.1: # %start +; RV32I-NEXT: li a0, 255 +; RV32I-NEXT: j .LBB38_3 +; RV32I-NEXT: .LBB38_2: ; RV32I-NEXT: slti a0, s0, 0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a1, a0, s1 -; RV32I-NEXT: .LBB38_2: # %start -; RV32I-NEXT: andi a0, a1, 255 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: .LBB38_3: # %start +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload @@ -2872,14 +2867,16 @@ ; RV64I-NEXT: lui a1, 276464 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: li a1, 255 -; RV64I-NEXT: bgtz a0, .LBB38_2 +; RV64I-NEXT: blez a0, .LBB38_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, 255 +; RV64I-NEXT: j .LBB38_3 +; RV64I-NEXT: .LBB38_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a1, a0, s1 -; RV64I-NEXT: .LBB38_2: # %start -; RV64I-NEXT: andi a0, a1, 255 +; RV64I-NEXT: and a0, a0, s1 +; RV64I-NEXT: .LBB38_3: # %start +; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -2987,15 +2984,15 @@ ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt -; RV64I-NEXT: bgtz a0, .LBB39_2 +; RV64I-NEXT: blez a0, .LBB39_2 ; RV64I-NEXT: # %bb.1: # %start +; RV64I-NEXT: li a0, -1 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: j .LBB39_3 +; RV64I-NEXT: .LBB39_2: ; RV64I-NEXT: slti a0, s0, 0 ; RV64I-NEXT: addi a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 -; RV64I-NEXT: j .LBB39_3 -; RV64I-NEXT: .LBB39_2: -; RV64I-NEXT: li a0, -1 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: .LBB39_3: # %start ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 @@ -3052,29 +3049,29 @@ ; RV32I-NEXT: mv s0, a0 ; RV32I-NEXT: lui a1, 847872 ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixsfsi@plt +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui s3, 524288 -; RV32I-NEXT: lui s2, 524288 -; RV32I-NEXT: bltz s1, .LBB40_2 +; RV32I-NEXT: bgez s2, .LBB40_2 ; RV32I-NEXT: # %bb.1: # %start -; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB40_2: # %start ; RV32I-NEXT: lui a0, 323584 ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB40_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi s2, s3, -1 +; RV32I-NEXT: # %bb.3: # %start +; RV32I-NEXT: addi s1, s3, -1 ; RV32I-NEXT: .LBB40_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call __unordsf2@plt ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 +; RV32I-NEXT: and a0, a0, s1 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -3097,29 +3094,29 @@ ; RV64I-NEXT: mv s0, a0 ; RV64I-NEXT: lui a1, 847872 ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixsfdi@plt +; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lui s3, 524288 -; RV64I-NEXT: lui s2, 524288 -; RV64I-NEXT: bltz s1, .LBB40_2 +; RV64I-NEXT: bgez s2, .LBB40_2 ; RV64I-NEXT: # %bb.1: # %start -; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB40_2: # %start ; RV64I-NEXT: lui a0, 323584 ; RV64I-NEXT: addiw a1, a0, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB40_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: addiw s2, s3, -1 +; RV64I-NEXT: # %bb.3: # %start +; RV64I-NEXT: addiw s1, s3, -1 ; RV64I-NEXT: .LBB40_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: and a0, a0, s2 +; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -227,21 +227,22 @@ ; RV32I-NEXT: and a0, s2, a0 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __mulsi3@plt -; RV32I-NEXT: li a1, 32 -; RV32I-NEXT: beqz s2, .LBB3_2 +; RV32I-NEXT: bnez s2, .LBB3_3 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: srli a0, a0, 27 -; RV32I-NEXT: add a0, s4, a0 -; RV32I-NEXT: lbu a1, 0(a0) +; RV32I-NEXT: li a0, 32 +; RV32I-NEXT: beqz s0, .LBB3_4 ; RV32I-NEXT: .LBB3_2: -; RV32I-NEXT: bnez s0, .LBB3_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi a0, a1, 32 -; RV32I-NEXT: j .LBB3_5 -; RV32I-NEXT: .LBB3_4: ; RV32I-NEXT: srli a0, s1, 27 ; RV32I-NEXT: add a0, s4, a0 ; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: j .LBB3_5 +; RV32I-NEXT: .LBB3_3: +; RV32I-NEXT: srli a0, a0, 27 +; RV32I-NEXT: add a0, s4, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: bnez s0, .LBB3_2 +; RV32I-NEXT: .LBB3_4: +; RV32I-NEXT: addi a0, a0, 32 ; RV32I-NEXT: .LBB3_5: ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload