diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1031,6 +1031,7 @@ // Jumps are expensive, compared to logic setJumpIsExpensive(); + setTargetDAGCombine(ISD::SELECT); setTargetDAGCombine(ISD::ADD); setTargetDAGCombine(ISD::SUB); setTargetDAGCombine(ISD::AND); @@ -8383,6 +8384,22 @@ break; } + case ISD::SELECT: { + SDValue Cond = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); + EVT VT = N->getValueType(0); + SDLoc DL(N); + + auto *FalseV = dyn_cast(N2); + if (!FalseV || !FalseV->isZero()) + break; + + // select Cond, X, 0 --> and (sext Cond), X + if (VT != MVT::i1) + Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond); + return DAG.getNode(ISD::AND, DL, VT, Cond, N1); + } case RISCVISD::BR_CC: { SDValue LHS = N->getOperand(1); SDValue RHS = N->getOperand(2); diff --git a/llvm/test/CodeGen/RISCV/alu64.ll b/llvm/test/CodeGen/RISCV/alu64.ll --- a/llvm/test/CodeGen/RISCV/alu64.ll +++ b/llvm/test/CodeGen/RISCV/alu64.ll @@ -58,13 +58,9 @@ ; ; RV32I-LABEL: sltiu: ; RV32I: # %bb.0: -; RV32I-NEXT: beqz a1, .LBB2_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB2_2: ; RV32I-NEXT: sltiu a0, a0, 3 +; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret %1 = icmp ult i64 %a, 3 diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -29,9 +29,9 @@ ; RV32IF-NEXT: sltu a4, a0, a3 ; RV32IF-NEXT: bnez a4, .LBB0_4 ; RV32IF-NEXT: .LBB0_3: # %entry -; RV32IF-NEXT: li a1, 0 ; RV32IF-NEXT: mv a0, a3 ; RV32IF-NEXT: .LBB0_4: # %entry +; RV32IF-NEXT: and a1, a4, a1 ; RV32IF-NEXT: li a3, -1 ; RV32IF-NEXT: beq a1, a3, .LBB0_6 ; RV32IF-NEXT: # %bb.5: # %entry @@ -112,18 +112,14 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunsdfdi@plt -; RV32IF-NEXT: beqz a1, .LBB1_2 +; RV32IF-NEXT: seqz a1, a1 +; RV32IF-NEXT: addi a2, a0, 1 +; RV32IF-NEXT: snez a2, a2 +; RV32IF-NEXT: and a1, a1, a2 +; RV32IF-NEXT: bnez a1, .LBB1_2 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: beqz a1, .LBB1_3 -; RV32IF-NEXT: j .LBB1_4 -; RV32IF-NEXT: .LBB1_2: -; RV32IF-NEXT: addi a1, a0, 1 -; RV32IF-NEXT: snez a1, a1 -; RV32IF-NEXT: bnez a1, .LBB1_4 -; RV32IF-NEXT: .LBB1_3: # %entry ; RV32IF-NEXT: li a0, -1 -; RV32IF-NEXT: .LBB1_4: # %entry +; RV32IF-NEXT: .LBB1_2: # %entry ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret @@ -190,20 +186,18 @@ ; RV32IF-NEXT: snez a2, a2 ; RV32IF-NEXT: bnez a2, .LBB2_4 ; RV32IF-NEXT: .LBB2_3: # %entry -; RV32IF-NEXT: li a1, 0 ; RV32IF-NEXT: li a0, -1 ; RV32IF-NEXT: .LBB2_4: # %entry +; RV32IF-NEXT: and a1, a2, a1 ; RV32IF-NEXT: beqz a1, .LBB2_6 ; RV32IF-NEXT: # %bb.5: # %entry ; RV32IF-NEXT: sgtz a1, a1 -; RV32IF-NEXT: beqz a1, .LBB2_7 -; RV32IF-NEXT: j .LBB2_8 +; RV32IF-NEXT: j .LBB2_7 ; RV32IF-NEXT: .LBB2_6: ; RV32IF-NEXT: snez a1, a0 -; RV32IF-NEXT: bnez a1, .LBB2_8 ; RV32IF-NEXT: .LBB2_7: # %entry -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: .LBB2_8: # %entry +; RV32IF-NEXT: neg a1, a1 +; RV32IF-NEXT: and a0, a1, a0 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret @@ -221,10 +215,9 @@ ; RV64IF-NEXT: # %bb.1: # %entry ; RV64IF-NEXT: mv a0, a1 ; RV64IF-NEXT: .LBB2_2: # %entry -; RV64IF-NEXT: bgtz a0, .LBB2_4 -; RV64IF-NEXT: # %bb.3: # %entry -; RV64IF-NEXT: li a0, 0 -; RV64IF-NEXT: .LBB2_4: # %entry +; RV64IF-NEXT: sgtz a1, a0 +; RV64IF-NEXT: neg a1, a1 +; RV64IF-NEXT: and a0, a1, a0 ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret @@ -243,16 +236,13 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: li a1, -1 ; RV64IFD-NEXT: srli a1, a1, 32 -; RV64IFD-NEXT: bge a0, a1, .LBB2_3 +; RV64IFD-NEXT: blt a0, a1, .LBB2_2 ; RV64IFD-NEXT: # %bb.1: # %entry -; RV64IFD-NEXT: blez a0, .LBB2_4 -; RV64IFD-NEXT: .LBB2_2: # %entry -; RV64IFD-NEXT: ret -; RV64IFD-NEXT: .LBB2_3: # %entry ; RV64IFD-NEXT: mv a0, a1 -; RV64IFD-NEXT: bgtz a0, .LBB2_2 -; RV64IFD-NEXT: .LBB2_4: # %entry -; RV64IFD-NEXT: li a0, 0 +; RV64IFD-NEXT: .LBB2_2: # %entry +; RV64IFD-NEXT: sgtz a1, a0 +; RV64IFD-NEXT: neg a1, a1 +; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret entry: %conv = fptosi double %x to i64 @@ -343,16 +333,13 @@ ; RV64-NEXT: fcvt.l.s a0, fa0, rtz ; RV64-NEXT: li a1, -1 ; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: bge a0, a1, .LBB5_3 +; RV64-NEXT: blt a0, a1, .LBB5_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: blez a0, .LBB5_4 -; RV64-NEXT: .LBB5_2: # %entry -; RV64-NEXT: ret -; RV64-NEXT: .LBB5_3: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: bgtz a0, .LBB5_2 -; RV64-NEXT: .LBB5_4: # %entry -; RV64-NEXT: li a0, 0 +; RV64-NEXT: .LBB5_2: # %entry +; RV64-NEXT: sgtz a1, a0 +; RV64-NEXT: neg a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ret entry: %conv = fptosi float %x to i64 @@ -385,9 +372,9 @@ ; RV32-NEXT: sltu a4, a0, a3 ; RV32-NEXT: bnez a4, .LBB6_4 ; RV32-NEXT: .LBB6_3: # %entry -; RV32-NEXT: li a1, 0 ; RV32-NEXT: mv a0, a3 ; RV32-NEXT: .LBB6_4: # %entry +; RV32-NEXT: and a1, a4, a1 ; RV32-NEXT: li a3, -1 ; RV32-NEXT: beq a1, a3, .LBB6_6 ; RV32-NEXT: # %bb.5: # %entry @@ -446,18 +433,14 @@ ; RV32-NEXT: fmv.x.w a0, fa0 ; RV32-NEXT: call __extendhfsf2@plt ; RV32-NEXT: call __fixunssfdi@plt -; RV32-NEXT: beqz a1, .LBB7_2 +; RV32-NEXT: seqz a1, a1 +; RV32-NEXT: addi a2, a0, 1 +; RV32-NEXT: snez a2, a2 +; RV32-NEXT: and a1, a1, a2 +; RV32-NEXT: bnez a1, .LBB7_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a1, .LBB7_3 -; RV32-NEXT: j .LBB7_4 -; RV32-NEXT: .LBB7_2: -; RV32-NEXT: addi a1, a0, 1 -; RV32-NEXT: snez a1, a1 -; RV32-NEXT: bnez a1, .LBB7_4 -; RV32-NEXT: .LBB7_3: # %entry ; RV32-NEXT: li a0, -1 -; RV32-NEXT: .LBB7_4: # %entry +; RV32-NEXT: .LBB7_2: # %entry ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -508,20 +491,18 @@ ; RV32-NEXT: snez a2, a2 ; RV32-NEXT: bnez a2, .LBB8_4 ; RV32-NEXT: .LBB8_3: # %entry -; RV32-NEXT: li a1, 0 ; RV32-NEXT: li a0, -1 ; RV32-NEXT: .LBB8_4: # %entry +; RV32-NEXT: and a1, a2, a1 ; RV32-NEXT: beqz a1, .LBB8_6 ; RV32-NEXT: # %bb.5: # %entry ; RV32-NEXT: sgtz a1, a1 -; RV32-NEXT: beqz a1, .LBB8_7 -; RV32-NEXT: j .LBB8_8 +; RV32-NEXT: j .LBB8_7 ; RV32-NEXT: .LBB8_6: ; RV32-NEXT: snez a1, a0 -; RV32-NEXT: bnez a1, .LBB8_8 ; RV32-NEXT: .LBB8_7: # %entry -; RV32-NEXT: li a0, 0 -; RV32-NEXT: .LBB8_8: # %entry +; RV32-NEXT: neg a1, a1 +; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -541,10 +522,9 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB8_2: # %entry -; RV64-NEXT: bgtz a0, .LBB8_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB8_4: # %entry +; RV64-NEXT: sgtz a1, a0 +; RV64-NEXT: neg a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -730,10 +710,9 @@ ; RV32IF-NEXT: # %bb.1: # %entry ; RV32IF-NEXT: mv a0, a1 ; RV32IF-NEXT: .LBB11_2: # %entry -; RV32IF-NEXT: bgtz a0, .LBB11_4 -; RV32IF-NEXT: # %bb.3: # %entry -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: .LBB11_4: # %entry +; RV32IF-NEXT: sgtz a1, a0 +; RV32IF-NEXT: neg a1, a1 +; RV32IF-NEXT: and a0, a1, a0 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret @@ -751,10 +730,9 @@ ; RV64IF-NEXT: # %bb.1: # %entry ; RV64IF-NEXT: mv a0, a1 ; RV64IF-NEXT: .LBB11_2: # %entry -; RV64IF-NEXT: bgtz a0, .LBB11_4 -; RV64IF-NEXT: # %bb.3: # %entry -; RV64IF-NEXT: li a0, 0 -; RV64IF-NEXT: .LBB11_4: # %entry +; RV64IF-NEXT: sgtz a1, a0 +; RV64IF-NEXT: neg a1, a1 +; RV64IF-NEXT: and a0, a1, a0 ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret @@ -764,16 +742,13 @@ ; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz ; RV32IFD-NEXT: lui a1, 16 ; RV32IFD-NEXT: addi a1, a1, -1 -; RV32IFD-NEXT: bge a0, a1, .LBB11_3 +; RV32IFD-NEXT: blt a0, a1, .LBB11_2 ; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: blez a0, .LBB11_4 -; RV32IFD-NEXT: .LBB11_2: # %entry -; RV32IFD-NEXT: ret -; RV32IFD-NEXT: .LBB11_3: # %entry ; RV32IFD-NEXT: mv a0, a1 -; RV32IFD-NEXT: bgtz a0, .LBB11_2 -; RV32IFD-NEXT: .LBB11_4: # %entry -; RV32IFD-NEXT: li a0, 0 +; RV32IFD-NEXT: .LBB11_2: # %entry +; RV32IFD-NEXT: sgtz a1, a0 +; RV32IFD-NEXT: neg a1, a1 +; RV32IFD-NEXT: and a0, a1, a0 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: ustest_f64i16: @@ -781,16 +756,13 @@ ; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz ; RV64IFD-NEXT: lui a1, 16 ; RV64IFD-NEXT: addiw a1, a1, -1 -; RV64IFD-NEXT: bge a0, a1, .LBB11_3 +; RV64IFD-NEXT: blt a0, a1, .LBB11_2 ; RV64IFD-NEXT: # %bb.1: # %entry -; RV64IFD-NEXT: blez a0, .LBB11_4 -; RV64IFD-NEXT: .LBB11_2: # %entry -; RV64IFD-NEXT: ret -; RV64IFD-NEXT: .LBB11_3: # %entry ; RV64IFD-NEXT: mv a0, a1 -; RV64IFD-NEXT: bgtz a0, .LBB11_2 -; RV64IFD-NEXT: .LBB11_4: # %entry -; RV64IFD-NEXT: li a0, 0 +; RV64IFD-NEXT: .LBB11_2: # %entry +; RV64IFD-NEXT: sgtz a1, a0 +; RV64IFD-NEXT: neg a1, a1 +; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret entry: %conv = fptosi double %x to i32 @@ -886,16 +858,13 @@ ; RV32-NEXT: fcvt.w.s a0, fa0, rtz ; RV32-NEXT: lui a1, 16 ; RV32-NEXT: addi a1, a1, -1 -; RV32-NEXT: bge a0, a1, .LBB14_3 +; RV32-NEXT: blt a0, a1, .LBB14_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: blez a0, .LBB14_4 -; RV32-NEXT: .LBB14_2: # %entry -; RV32-NEXT: ret -; RV32-NEXT: .LBB14_3: # %entry ; RV32-NEXT: mv a0, a1 -; RV32-NEXT: bgtz a0, .LBB14_2 -; RV32-NEXT: .LBB14_4: # %entry -; RV32-NEXT: li a0, 0 +; RV32-NEXT: .LBB14_2: # %entry +; RV32-NEXT: sgtz a1, a0 +; RV32-NEXT: neg a1, a1 +; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: ustest_f32i16: @@ -903,16 +872,13 @@ ; RV64-NEXT: fcvt.w.s a0, fa0, rtz ; RV64-NEXT: lui a1, 16 ; RV64-NEXT: addiw a1, a1, -1 -; RV64-NEXT: bge a0, a1, .LBB14_3 +; RV64-NEXT: blt a0, a1, .LBB14_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: blez a0, .LBB14_4 -; RV64-NEXT: .LBB14_2: # %entry -; RV64-NEXT: ret -; RV64-NEXT: .LBB14_3: # %entry ; RV64-NEXT: mv a0, a1 -; RV64-NEXT: bgtz a0, .LBB14_2 -; RV64-NEXT: .LBB14_4: # %entry -; RV64-NEXT: li a0, 0 +; RV64-NEXT: .LBB14_2: # %entry +; RV64-NEXT: sgtz a1, a0 +; RV64-NEXT: neg a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ret entry: %conv = fptosi float %x to i32 @@ -1044,10 +1010,9 @@ ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB17_2: # %entry -; RV32-NEXT: bgtz a0, .LBB17_4 -; RV32-NEXT: # %bb.3: # %entry -; RV32-NEXT: li a0, 0 -; RV32-NEXT: .LBB17_4: # %entry +; RV32-NEXT: sgtz a1, a0 +; RV32-NEXT: neg a1, a1 +; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -1067,10 +1032,9 @@ ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB17_2: # %entry -; RV64-NEXT: bgtz a0, .LBB17_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB17_4: # %entry +; RV64-NEXT: sgtz a1, a0 +; RV64-NEXT: neg a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1118,32 +1082,31 @@ ; RV32IF-NEXT: slti a7, a2, 0 ; RV32IF-NEXT: .LBB18_4: # %entry ; RV32IF-NEXT: li a6, -1 -; RV32IF-NEXT: beqz a7, .LBB18_7 +; RV32IF-NEXT: bnez a7, .LBB18_6 ; RV32IF-NEXT: # %bb.5: # %entry -; RV32IF-NEXT: beq a1, a4, .LBB18_8 +; RV32IF-NEXT: li a0, -1 +; RV32IF-NEXT: mv a1, a5 ; RV32IF-NEXT: .LBB18_6: # %entry -; RV32IF-NEXT: sltu a4, a4, a1 -; RV32IF-NEXT: and a3, a3, a2 +; RV32IF-NEXT: and a5, a7, a2 +; RV32IF-NEXT: and a3, a7, a3 +; RV32IF-NEXT: beq a1, a4, .LBB18_8 +; RV32IF-NEXT: # %bb.7: # %entry +; RV32IF-NEXT: sltu a2, a4, a1 +; RV32IF-NEXT: and a3, a3, a5 ; RV32IF-NEXT: bne a3, a6, .LBB18_9 ; RV32IF-NEXT: j .LBB18_10 -; RV32IF-NEXT: .LBB18_7: # %entry -; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: li a3, 0 -; RV32IF-NEXT: li a0, -1 -; RV32IF-NEXT: mv a1, a5 -; RV32IF-NEXT: bne a1, a4, .LBB18_6 ; RV32IF-NEXT: .LBB18_8: -; RV32IF-NEXT: snez a4, a0 -; RV32IF-NEXT: and a3, a3, a2 +; RV32IF-NEXT: snez a2, a0 +; RV32IF-NEXT: and a3, a3, a5 ; RV32IF-NEXT: beq a3, a6, .LBB18_10 ; RV32IF-NEXT: .LBB18_9: # %entry -; RV32IF-NEXT: slt a4, a6, a2 +; RV32IF-NEXT: slt a2, a6, a5 ; RV32IF-NEXT: .LBB18_10: # %entry -; RV32IF-NEXT: bnez a4, .LBB18_12 +; RV32IF-NEXT: bnez a2, .LBB18_12 ; RV32IF-NEXT: # %bb.11: # %entry -; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB18_12: # %entry +; RV32IF-NEXT: and a0, a2, a0 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -1166,20 +1129,20 @@ ; RV64IF-NEXT: sltu a4, a0, a3 ; RV64IF-NEXT: bnez a4, .LBB18_4 ; RV64IF-NEXT: .LBB18_3: # %entry -; RV64IF-NEXT: li a1, 0 ; RV64IF-NEXT: mv a0, a3 ; RV64IF-NEXT: .LBB18_4: # %entry -; RV64IF-NEXT: slli a3, a2, 63 -; RV64IF-NEXT: beq a1, a2, .LBB18_6 +; RV64IF-NEXT: and a3, a4, a1 +; RV64IF-NEXT: slli a1, a2, 63 +; RV64IF-NEXT: beq a3, a2, .LBB18_6 ; RV64IF-NEXT: # %bb.5: # %entry -; RV64IF-NEXT: slt a1, a2, a1 -; RV64IF-NEXT: beqz a1, .LBB18_7 +; RV64IF-NEXT: slt a2, a2, a3 +; RV64IF-NEXT: beqz a2, .LBB18_7 ; RV64IF-NEXT: j .LBB18_8 ; RV64IF-NEXT: .LBB18_6: -; RV64IF-NEXT: sltu a1, a3, a0 -; RV64IF-NEXT: bnez a1, .LBB18_8 +; RV64IF-NEXT: sltu a2, a1, a0 +; RV64IF-NEXT: bnez a2, .LBB18_8 ; RV64IF-NEXT: .LBB18_7: # %entry -; RV64IF-NEXT: mv a0, a3 +; RV64IF-NEXT: mv a0, a1 ; RV64IF-NEXT: .LBB18_8: # %entry ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -1214,32 +1177,31 @@ ; RV32IFD-NEXT: slti a7, a2, 0 ; RV32IFD-NEXT: .LBB18_4: # %entry ; RV32IFD-NEXT: li a6, -1 -; RV32IFD-NEXT: beqz a7, .LBB18_7 +; RV32IFD-NEXT: bnez a7, .LBB18_6 ; RV32IFD-NEXT: # %bb.5: # %entry -; RV32IFD-NEXT: beq a1, a4, .LBB18_8 +; RV32IFD-NEXT: li a0, -1 +; RV32IFD-NEXT: mv a1, a5 ; RV32IFD-NEXT: .LBB18_6: # %entry -; RV32IFD-NEXT: sltu a4, a4, a1 -; RV32IFD-NEXT: and a3, a3, a2 +; RV32IFD-NEXT: and a5, a7, a2 +; RV32IFD-NEXT: and a3, a7, a3 +; RV32IFD-NEXT: beq a1, a4, .LBB18_8 +; RV32IFD-NEXT: # %bb.7: # %entry +; RV32IFD-NEXT: sltu a2, a4, a1 +; RV32IFD-NEXT: and a3, a3, a5 ; RV32IFD-NEXT: bne a3, a6, .LBB18_9 ; RV32IFD-NEXT: j .LBB18_10 -; RV32IFD-NEXT: .LBB18_7: # %entry -; RV32IFD-NEXT: li a2, 0 -; RV32IFD-NEXT: li a3, 0 -; RV32IFD-NEXT: li a0, -1 -; RV32IFD-NEXT: mv a1, a5 -; RV32IFD-NEXT: bne a1, a4, .LBB18_6 ; RV32IFD-NEXT: .LBB18_8: -; RV32IFD-NEXT: snez a4, a0 -; RV32IFD-NEXT: and a3, a3, a2 +; RV32IFD-NEXT: snez a2, a0 +; RV32IFD-NEXT: and a3, a3, a5 ; RV32IFD-NEXT: beq a3, a6, .LBB18_10 ; RV32IFD-NEXT: .LBB18_9: # %entry -; RV32IFD-NEXT: slt a4, a6, a2 +; RV32IFD-NEXT: slt a2, a6, a5 ; RV32IFD-NEXT: .LBB18_10: # %entry -; RV32IFD-NEXT: bnez a4, .LBB18_12 +; RV32IFD-NEXT: bnez a2, .LBB18_12 ; RV32IFD-NEXT: # %bb.11: # %entry -; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: lui a1, 524288 ; RV32IFD-NEXT: .LBB18_12: # %entry +; RV32IFD-NEXT: and a0, a2, a0 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -1273,30 +1235,20 @@ ; RV32IF-NEXT: mv a1, a0 ; RV32IF-NEXT: addi a0, sp, 8 ; RV32IF-NEXT: call __fixunsdfti@plt -; RV32IF-NEXT: lw a0, 20(sp) -; RV32IF-NEXT: lw a1, 16(sp) -; RV32IF-NEXT: beqz a0, .LBB19_2 -; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: j .LBB19_3 -; RV32IF-NEXT: .LBB19_2: -; RV32IF-NEXT: seqz a2, a1 -; RV32IF-NEXT: .LBB19_3: # %entry -; RV32IF-NEXT: xori a1, a1, 1 -; RV32IF-NEXT: or a1, a1, a0 -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: beqz a1, .LBB19_5 -; RV32IF-NEXT: # %bb.4: # %entry -; RV32IF-NEXT: mv a0, a2 -; RV32IF-NEXT: .LBB19_5: # %entry -; RV32IF-NEXT: bnez a0, .LBB19_7 -; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: j .LBB19_8 -; RV32IF-NEXT: .LBB19_7: +; RV32IF-NEXT: lw a2, 16(sp) +; RV32IF-NEXT: lw a4, 20(sp) ; RV32IF-NEXT: lw a1, 12(sp) ; RV32IF-NEXT: lw a0, 8(sp) -; RV32IF-NEXT: .LBB19_8: # %entry +; RV32IF-NEXT: xori a3, a2, 1 +; RV32IF-NEXT: or a5, a3, a4 +; RV32IF-NEXT: li a3, 0 +; RV32IF-NEXT: beqz a5, .LBB19_2 +; RV32IF-NEXT: # %bb.1: # %entry +; RV32IF-NEXT: or a2, a4, a2 +; RV32IF-NEXT: seqz a3, a2 +; RV32IF-NEXT: .LBB19_2: # %entry +; RV32IF-NEXT: and a0, a3, a0 +; RV32IF-NEXT: and a1, a3, a1 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -1308,10 +1260,8 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixunsdfti@plt -; RV64-NEXT: beqz a1, .LBB19_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB19_2: # %entry +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1324,30 +1274,20 @@ ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call __fixunsdfti@plt -; RV32IFD-NEXT: lw a0, 20(sp) -; RV32IFD-NEXT: lw a1, 16(sp) -; RV32IFD-NEXT: beqz a0, .LBB19_2 -; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: li a2, 0 -; RV32IFD-NEXT: j .LBB19_3 -; RV32IFD-NEXT: .LBB19_2: -; RV32IFD-NEXT: seqz a2, a1 -; RV32IFD-NEXT: .LBB19_3: # %entry -; RV32IFD-NEXT: xori a1, a1, 1 -; RV32IFD-NEXT: or a1, a1, a0 -; RV32IFD-NEXT: li a0, 0 -; RV32IFD-NEXT: beqz a1, .LBB19_5 -; RV32IFD-NEXT: # %bb.4: # %entry -; RV32IFD-NEXT: mv a0, a2 -; RV32IFD-NEXT: .LBB19_5: # %entry -; RV32IFD-NEXT: bnez a0, .LBB19_7 -; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: j .LBB19_8 -; RV32IFD-NEXT: .LBB19_7: +; RV32IFD-NEXT: lw a2, 16(sp) +; RV32IFD-NEXT: lw a4, 20(sp) ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: .LBB19_8: # %entry +; RV32IFD-NEXT: xori a3, a2, 1 +; RV32IFD-NEXT: or a5, a3, a4 +; RV32IFD-NEXT: li a3, 0 +; RV32IFD-NEXT: beqz a5, .LBB19_2 +; RV32IFD-NEXT: # %bb.1: # %entry +; RV32IFD-NEXT: or a2, a4, a2 +; RV32IFD-NEXT: seqz a3, a2 +; RV32IFD-NEXT: .LBB19_2: # %entry +; RV32IFD-NEXT: and a0, a3, a0 +; RV32IFD-NEXT: and a1, a3, a1 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -1370,55 +1310,51 @@ ; RV32IF-NEXT: mv a1, a0 ; RV32IF-NEXT: addi a0, sp, 8 ; RV32IF-NEXT: call __fixdfti@plt -; RV32IF-NEXT: lw a2, 20(sp) -; RV32IF-NEXT: lw a3, 16(sp) -; RV32IF-NEXT: beqz a2, .LBB20_2 +; RV32IF-NEXT: lw a1, 20(sp) +; RV32IF-NEXT: lw a0, 16(sp) +; RV32IF-NEXT: beqz a1, .LBB20_2 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: slti a0, a2, 0 +; RV32IF-NEXT: slti a3, a1, 0 ; RV32IF-NEXT: j .LBB20_3 ; RV32IF-NEXT: .LBB20_2: -; RV32IF-NEXT: seqz a0, a3 +; RV32IF-NEXT: seqz a3, a0 ; RV32IF-NEXT: .LBB20_3: # %entry -; RV32IF-NEXT: xori a1, a3, 1 -; RV32IF-NEXT: or a4, a1, a2 -; RV32IF-NEXT: li a1, 0 +; RV32IF-NEXT: xori a2, a0, 1 +; RV32IF-NEXT: or a4, a2, a1 +; RV32IF-NEXT: li a2, 0 ; RV32IF-NEXT: beqz a4, .LBB20_5 ; RV32IF-NEXT: # %bb.4: # %entry -; RV32IF-NEXT: mv a1, a0 +; RV32IF-NEXT: mv a2, a3 ; RV32IF-NEXT: .LBB20_5: # %entry -; RV32IF-NEXT: bnez a1, .LBB20_9 +; RV32IF-NEXT: lw a4, 12(sp) +; RV32IF-NEXT: bnez a2, .LBB20_7 ; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: li a3, 1 -; RV32IF-NEXT: bnez a2, .LBB20_10 -; RV32IF-NEXT: .LBB20_7: -; RV32IF-NEXT: snez a4, a3 -; RV32IF-NEXT: bnez a1, .LBB20_11 -; RV32IF-NEXT: .LBB20_8: -; RV32IF-NEXT: snez a5, a0 -; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: bnez a2, .LBB20_12 -; RV32IF-NEXT: j .LBB20_13 +; RV32IF-NEXT: li a0, 1 +; RV32IF-NEXT: .LBB20_7: # %entry +; RV32IF-NEXT: lw a5, 8(sp) +; RV32IF-NEXT: and a3, a2, a1 +; RV32IF-NEXT: and a1, a2, a4 +; RV32IF-NEXT: beqz a3, .LBB20_9 +; RV32IF-NEXT: # %bb.8: # %entry +; RV32IF-NEXT: sgtz a4, a3 +; RV32IF-NEXT: j .LBB20_10 ; RV32IF-NEXT: .LBB20_9: -; RV32IF-NEXT: lw a1, 12(sp) -; RV32IF-NEXT: lw a0, 8(sp) -; RV32IF-NEXT: beqz a2, .LBB20_7 +; RV32IF-NEXT: snez a4, a0 ; RV32IF-NEXT: .LBB20_10: # %entry -; RV32IF-NEXT: sgtz a4, a2 -; RV32IF-NEXT: beqz a1, .LBB20_8 -; RV32IF-NEXT: .LBB20_11: # %entry -; RV32IF-NEXT: snez a5, a1 -; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: beqz a2, .LBB20_13 +; RV32IF-NEXT: and a2, a2, a5 +; RV32IF-NEXT: mv a5, a2 +; RV32IF-NEXT: beqz a1, .LBB20_12 +; RV32IF-NEXT: # %bb.11: # %entry +; RV32IF-NEXT: mv a5, a1 ; RV32IF-NEXT: .LBB20_12: # %entry +; RV32IF-NEXT: or a0, a0, a3 +; RV32IF-NEXT: beqz a0, .LBB20_14 +; RV32IF-NEXT: # %bb.13: # %entry ; RV32IF-NEXT: mv a5, a4 -; RV32IF-NEXT: .LBB20_13: # %entry -; RV32IF-NEXT: bnez a5, .LBB20_15 -; RV32IF-NEXT: # %bb.14: # %entry -; RV32IF-NEXT: li a0, 0 -; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: .LBB20_15: # %entry +; RV32IF-NEXT: .LBB20_14: # %entry +; RV32IF-NEXT: neg a3, a5 +; RV32IF-NEXT: and a0, a3, a2 +; RV32IF-NEXT: and a1, a3, a1 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -1430,22 +1366,18 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixdfti@plt +; RV64-NEXT: slti a2, a1, 1 ; RV64-NEXT: blez a1, .LBB20_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: li a0, 0 ; RV64-NEXT: li a1, 1 ; RV64-NEXT: .LBB20_2: # %entry +; RV64-NEXT: and a0, a2, a0 +; RV64-NEXT: mv a2, a0 ; RV64-NEXT: beqz a1, .LBB20_4 ; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: sgtz a1, a1 -; RV64-NEXT: beqz a1, .LBB20_5 -; RV64-NEXT: j .LBB20_6 -; RV64-NEXT: .LBB20_4: -; RV64-NEXT: snez a1, a0 -; RV64-NEXT: bnez a1, .LBB20_6 -; RV64-NEXT: .LBB20_5: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB20_6: # %entry +; RV64-NEXT: sgtz a2, a1 +; RV64-NEXT: .LBB20_4: # %entry +; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1458,55 +1390,51 @@ ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call __fixdfti@plt -; RV32IFD-NEXT: lw a2, 20(sp) -; RV32IFD-NEXT: lw a3, 16(sp) -; RV32IFD-NEXT: beqz a2, .LBB20_2 +; RV32IFD-NEXT: lw a1, 20(sp) +; RV32IFD-NEXT: lw a0, 16(sp) +; RV32IFD-NEXT: beqz a1, .LBB20_2 ; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: slti a0, a2, 0 +; RV32IFD-NEXT: slti a3, a1, 0 ; RV32IFD-NEXT: j .LBB20_3 ; RV32IFD-NEXT: .LBB20_2: -; RV32IFD-NEXT: seqz a0, a3 +; RV32IFD-NEXT: seqz a3, a0 ; RV32IFD-NEXT: .LBB20_3: # %entry -; RV32IFD-NEXT: xori a1, a3, 1 -; RV32IFD-NEXT: or a4, a1, a2 -; RV32IFD-NEXT: li a1, 0 +; RV32IFD-NEXT: xori a2, a0, 1 +; RV32IFD-NEXT: or a4, a2, a1 +; RV32IFD-NEXT: li a2, 0 ; RV32IFD-NEXT: beqz a4, .LBB20_5 ; RV32IFD-NEXT: # %bb.4: # %entry -; RV32IFD-NEXT: mv a1, a0 +; RV32IFD-NEXT: mv a2, a3 ; RV32IFD-NEXT: .LBB20_5: # %entry -; RV32IFD-NEXT: bnez a1, .LBB20_9 +; RV32IFD-NEXT: lw a4, 12(sp) +; RV32IFD-NEXT: bnez a2, .LBB20_7 ; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: li a0, 0 -; RV32IFD-NEXT: li a2, 0 -; RV32IFD-NEXT: li a3, 1 -; RV32IFD-NEXT: bnez a2, .LBB20_10 -; RV32IFD-NEXT: .LBB20_7: -; RV32IFD-NEXT: snez a4, a3 -; RV32IFD-NEXT: bnez a1, .LBB20_11 -; RV32IFD-NEXT: .LBB20_8: -; RV32IFD-NEXT: snez a5, a0 -; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: bnez a2, .LBB20_12 -; RV32IFD-NEXT: j .LBB20_13 +; RV32IFD-NEXT: li a0, 1 +; RV32IFD-NEXT: .LBB20_7: # %entry +; RV32IFD-NEXT: lw a5, 8(sp) +; RV32IFD-NEXT: and a3, a2, a1 +; RV32IFD-NEXT: and a1, a2, a4 +; RV32IFD-NEXT: beqz a3, .LBB20_9 +; RV32IFD-NEXT: # %bb.8: # %entry +; RV32IFD-NEXT: sgtz a4, a3 +; RV32IFD-NEXT: j .LBB20_10 ; RV32IFD-NEXT: .LBB20_9: -; RV32IFD-NEXT: lw a1, 12(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: beqz a2, .LBB20_7 +; RV32IFD-NEXT: snez a4, a0 ; RV32IFD-NEXT: .LBB20_10: # %entry -; RV32IFD-NEXT: sgtz a4, a2 -; RV32IFD-NEXT: beqz a1, .LBB20_8 -; RV32IFD-NEXT: .LBB20_11: # %entry -; RV32IFD-NEXT: snez a5, a1 -; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: beqz a2, .LBB20_13 +; RV32IFD-NEXT: and a2, a2, a5 +; RV32IFD-NEXT: mv a5, a2 +; RV32IFD-NEXT: beqz a1, .LBB20_12 +; RV32IFD-NEXT: # %bb.11: # %entry +; RV32IFD-NEXT: mv a5, a1 ; RV32IFD-NEXT: .LBB20_12: # %entry +; RV32IFD-NEXT: or a0, a0, a3 +; RV32IFD-NEXT: beqz a0, .LBB20_14 +; RV32IFD-NEXT: # %bb.13: # %entry ; RV32IFD-NEXT: mv a5, a4 -; RV32IFD-NEXT: .LBB20_13: # %entry -; RV32IFD-NEXT: bnez a5, .LBB20_15 -; RV32IFD-NEXT: # %bb.14: # %entry -; RV32IFD-NEXT: li a0, 0 -; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: .LBB20_15: # %entry +; RV32IFD-NEXT: .LBB20_14: # %entry +; RV32IFD-NEXT: neg a3, a5 +; RV32IFD-NEXT: and a0, a3, a2 +; RV32IFD-NEXT: and a1, a3, a1 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -1550,32 +1478,31 @@ ; RV32-NEXT: slti a7, a2, 0 ; RV32-NEXT: .LBB21_4: # %entry ; RV32-NEXT: li a6, -1 -; RV32-NEXT: beqz a7, .LBB21_7 +; RV32-NEXT: bnez a7, .LBB21_6 ; RV32-NEXT: # %bb.5: # %entry -; RV32-NEXT: beq a1, a4, .LBB21_8 +; RV32-NEXT: li a0, -1 +; RV32-NEXT: mv a1, a5 ; RV32-NEXT: .LBB21_6: # %entry -; RV32-NEXT: sltu a4, a4, a1 -; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: and a5, a7, a2 +; RV32-NEXT: and a3, a7, a3 +; RV32-NEXT: beq a1, a4, .LBB21_8 +; RV32-NEXT: # %bb.7: # %entry +; RV32-NEXT: sltu a2, a4, a1 +; RV32-NEXT: and a3, a3, a5 ; RV32-NEXT: bne a3, a6, .LBB21_9 ; RV32-NEXT: j .LBB21_10 -; RV32-NEXT: .LBB21_7: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: li a3, 0 -; RV32-NEXT: li a0, -1 -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: bne a1, a4, .LBB21_6 ; RV32-NEXT: .LBB21_8: -; RV32-NEXT: snez a4, a0 -; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: snez a2, a0 +; RV32-NEXT: and a3, a3, a5 ; RV32-NEXT: beq a3, a6, .LBB21_10 ; RV32-NEXT: .LBB21_9: # %entry -; RV32-NEXT: slt a4, a6, a2 +; RV32-NEXT: slt a2, a6, a5 ; RV32-NEXT: .LBB21_10: # %entry -; RV32-NEXT: bnez a4, .LBB21_12 +; RV32-NEXT: bnez a2, .LBB21_12 ; RV32-NEXT: # %bb.11: # %entry -; RV32-NEXT: li a0, 0 ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: .LBB21_12: # %entry +; RV32-NEXT: and a0, a2, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1607,30 +1534,20 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixunssfti@plt -; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a1, 16(sp) -; RV32-NEXT: beqz a0, .LBB22_2 -; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: j .LBB22_3 -; RV32-NEXT: .LBB22_2: -; RV32-NEXT: seqz a2, a1 -; RV32-NEXT: .LBB22_3: # %entry -; RV32-NEXT: xori a1, a1, 1 -; RV32-NEXT: or a1, a1, a0 -; RV32-NEXT: li a0, 0 -; RV32-NEXT: beqz a1, .LBB22_5 -; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a0, a2 -; RV32-NEXT: .LBB22_5: # %entry -; RV32-NEXT: bnez a0, .LBB22_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: li a1, 0 -; RV32-NEXT: j .LBB22_8 -; RV32-NEXT: .LBB22_7: +; RV32-NEXT: lw a2, 16(sp) +; RV32-NEXT: lw a4, 20(sp) ; RV32-NEXT: lw a1, 12(sp) ; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: .LBB22_8: # %entry +; RV32-NEXT: xori a3, a2, 1 +; RV32-NEXT: or a5, a3, a4 +; RV32-NEXT: li a3, 0 +; RV32-NEXT: beqz a5, .LBB22_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: or a2, a4, a2 +; RV32-NEXT: seqz a3, a2 +; RV32-NEXT: .LBB22_2: # %entry +; RV32-NEXT: and a0, a3, a0 +; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1642,10 +1559,8 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixunssfti@plt -; RV64-NEXT: beqz a1, .LBB22_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB22_2: # %entry +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1666,55 +1581,51 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixsfti@plt -; RV32-NEXT: lw a2, 20(sp) -; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: beqz a2, .LBB23_2 +; RV32-NEXT: lw a1, 20(sp) +; RV32-NEXT: lw a0, 16(sp) +; RV32-NEXT: beqz a1, .LBB23_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: slti a0, a2, 0 +; RV32-NEXT: slti a3, a1, 0 ; RV32-NEXT: j .LBB23_3 ; RV32-NEXT: .LBB23_2: -; RV32-NEXT: seqz a0, a3 +; RV32-NEXT: seqz a3, a0 ; RV32-NEXT: .LBB23_3: # %entry -; RV32-NEXT: xori a1, a3, 1 -; RV32-NEXT: or a4, a1, a2 -; RV32-NEXT: li a1, 0 +; RV32-NEXT: xori a2, a0, 1 +; RV32-NEXT: or a4, a2, a1 +; RV32-NEXT: li a2, 0 ; RV32-NEXT: beqz a4, .LBB23_5 ; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a1, a0 +; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB23_5: # %entry -; RV32-NEXT: bnez a1, .LBB23_9 +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: bnez a2, .LBB23_7 ; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: li a0, 0 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: li a3, 1 -; RV32-NEXT: bnez a2, .LBB23_10 -; RV32-NEXT: .LBB23_7: -; RV32-NEXT: snez a4, a3 -; RV32-NEXT: bnez a1, .LBB23_11 -; RV32-NEXT: .LBB23_8: -; RV32-NEXT: snez a5, a0 -; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB23_12 -; RV32-NEXT: j .LBB23_13 +; RV32-NEXT: li a0, 1 +; RV32-NEXT: .LBB23_7: # %entry +; RV32-NEXT: lw a5, 8(sp) +; RV32-NEXT: and a3, a2, a1 +; RV32-NEXT: and a1, a2, a4 +; RV32-NEXT: beqz a3, .LBB23_9 +; RV32-NEXT: # %bb.8: # %entry +; RV32-NEXT: sgtz a4, a3 +; RV32-NEXT: j .LBB23_10 ; RV32-NEXT: .LBB23_9: -; RV32-NEXT: lw a1, 12(sp) -; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: beqz a2, .LBB23_7 +; RV32-NEXT: snez a4, a0 ; RV32-NEXT: .LBB23_10: # %entry -; RV32-NEXT: sgtz a4, a2 -; RV32-NEXT: beqz a1, .LBB23_8 -; RV32-NEXT: .LBB23_11: # %entry -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: beqz a2, .LBB23_13 +; RV32-NEXT: and a2, a2, a5 +; RV32-NEXT: mv a5, a2 +; RV32-NEXT: beqz a1, .LBB23_12 +; RV32-NEXT: # %bb.11: # %entry +; RV32-NEXT: mv a5, a1 ; RV32-NEXT: .LBB23_12: # %entry +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: beqz a0, .LBB23_14 +; RV32-NEXT: # %bb.13: # %entry ; RV32-NEXT: mv a5, a4 -; RV32-NEXT: .LBB23_13: # %entry -; RV32-NEXT: bnez a5, .LBB23_15 -; RV32-NEXT: # %bb.14: # %entry -; RV32-NEXT: li a0, 0 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: .LBB23_15: # %entry +; RV32-NEXT: .LBB23_14: # %entry +; RV32-NEXT: neg a3, a5 +; RV32-NEXT: and a0, a3, a2 +; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1726,22 +1637,18 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixsfti@plt +; RV64-NEXT: slti a2, a1, 1 ; RV64-NEXT: blez a1, .LBB23_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: li a0, 0 ; RV64-NEXT: li a1, 1 ; RV64-NEXT: .LBB23_2: # %entry +; RV64-NEXT: and a0, a2, a0 +; RV64-NEXT: mv a2, a0 ; RV64-NEXT: beqz a1, .LBB23_4 ; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: sgtz a1, a1 -; RV64-NEXT: beqz a1, .LBB23_5 -; RV64-NEXT: j .LBB23_6 -; RV64-NEXT: .LBB23_4: -; RV64-NEXT: snez a1, a0 -; RV64-NEXT: bnez a1, .LBB23_6 -; RV64-NEXT: .LBB23_5: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB23_6: # %entry +; RV64-NEXT: sgtz a2, a1 +; RV64-NEXT: .LBB23_4: # %entry +; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1787,32 +1694,31 @@ ; RV32-NEXT: slti a7, a2, 0 ; RV32-NEXT: .LBB24_4: # %entry ; RV32-NEXT: li a6, -1 -; RV32-NEXT: beqz a7, .LBB24_7 +; RV32-NEXT: bnez a7, .LBB24_6 ; RV32-NEXT: # %bb.5: # %entry -; RV32-NEXT: beq a1, a4, .LBB24_8 +; RV32-NEXT: li a0, -1 +; RV32-NEXT: mv a1, a5 ; RV32-NEXT: .LBB24_6: # %entry -; RV32-NEXT: sltu a4, a4, a1 -; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: and a5, a7, a2 +; RV32-NEXT: and a3, a7, a3 +; RV32-NEXT: beq a1, a4, .LBB24_8 +; RV32-NEXT: # %bb.7: # %entry +; RV32-NEXT: sltu a2, a4, a1 +; RV32-NEXT: and a3, a3, a5 ; RV32-NEXT: bne a3, a6, .LBB24_9 ; RV32-NEXT: j .LBB24_10 -; RV32-NEXT: .LBB24_7: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: li a3, 0 -; RV32-NEXT: li a0, -1 -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: bne a1, a4, .LBB24_6 ; RV32-NEXT: .LBB24_8: -; RV32-NEXT: snez a4, a0 -; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: snez a2, a0 +; RV32-NEXT: and a3, a3, a5 ; RV32-NEXT: beq a3, a6, .LBB24_10 ; RV32-NEXT: .LBB24_9: # %entry -; RV32-NEXT: slt a4, a6, a2 +; RV32-NEXT: slt a2, a6, a5 ; RV32-NEXT: .LBB24_10: # %entry -; RV32-NEXT: bnez a4, .LBB24_12 +; RV32-NEXT: bnez a2, .LBB24_12 ; RV32-NEXT: # %bb.11: # %entry -; RV32-NEXT: li a0, 0 ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: .LBB24_12: # %entry +; RV32-NEXT: and a0, a2, a0 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1837,20 +1743,20 @@ ; RV64-NEXT: sltu a4, a0, a3 ; RV64-NEXT: bnez a4, .LBB24_4 ; RV64-NEXT: .LBB24_3: # %entry -; RV64-NEXT: li a1, 0 ; RV64-NEXT: mv a0, a3 ; RV64-NEXT: .LBB24_4: # %entry -; RV64-NEXT: slli a3, a2, 63 -; RV64-NEXT: beq a1, a2, .LBB24_6 +; RV64-NEXT: and a3, a4, a1 +; RV64-NEXT: slli a1, a2, 63 +; RV64-NEXT: beq a3, a2, .LBB24_6 ; RV64-NEXT: # %bb.5: # %entry -; RV64-NEXT: slt a1, a2, a1 -; RV64-NEXT: beqz a1, .LBB24_7 +; RV64-NEXT: slt a2, a2, a3 +; RV64-NEXT: beqz a2, .LBB24_7 ; RV64-NEXT: j .LBB24_8 ; RV64-NEXT: .LBB24_6: -; RV64-NEXT: sltu a1, a3, a0 -; RV64-NEXT: bnez a1, .LBB24_8 +; RV64-NEXT: sltu a2, a1, a0 +; RV64-NEXT: bnez a2, .LBB24_8 ; RV64-NEXT: .LBB24_7: # %entry -; RV64-NEXT: mv a0, a3 +; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB24_8: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 @@ -1876,30 +1782,20 @@ ; RV32-NEXT: call __extendhfsf2@plt ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixunssfti@plt -; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a1, 16(sp) -; RV32-NEXT: beqz a0, .LBB25_2 -; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: j .LBB25_3 -; RV32-NEXT: .LBB25_2: -; RV32-NEXT: seqz a2, a1 -; RV32-NEXT: .LBB25_3: # %entry -; RV32-NEXT: xori a1, a1, 1 -; RV32-NEXT: or a1, a1, a0 -; RV32-NEXT: li a0, 0 -; RV32-NEXT: beqz a1, .LBB25_5 -; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a0, a2 -; RV32-NEXT: .LBB25_5: # %entry -; RV32-NEXT: bnez a0, .LBB25_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: li a1, 0 -; RV32-NEXT: j .LBB25_8 -; RV32-NEXT: .LBB25_7: +; RV32-NEXT: lw a2, 16(sp) +; RV32-NEXT: lw a4, 20(sp) ; RV32-NEXT: lw a1, 12(sp) ; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: .LBB25_8: # %entry +; RV32-NEXT: xori a3, a2, 1 +; RV32-NEXT: or a5, a3, a4 +; RV32-NEXT: li a3, 0 +; RV32-NEXT: beqz a5, .LBB25_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: or a2, a4, a2 +; RV32-NEXT: seqz a3, a2 +; RV32-NEXT: .LBB25_2: # %entry +; RV32-NEXT: and a0, a3, a0 +; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1913,10 +1809,8 @@ ; RV64-NEXT: fmv.x.w a0, fa0 ; RV64-NEXT: call __extendhfsf2@plt ; RV64-NEXT: call __fixunssfti@plt -; RV64-NEXT: beqz a1, .LBB25_2 -; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB25_2: # %entry +; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -1939,55 +1833,51 @@ ; RV32-NEXT: call __extendhfsf2@plt ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixsfti@plt -; RV32-NEXT: lw a2, 20(sp) -; RV32-NEXT: lw a3, 16(sp) -; RV32-NEXT: beqz a2, .LBB26_2 +; RV32-NEXT: lw a1, 20(sp) +; RV32-NEXT: lw a0, 16(sp) +; RV32-NEXT: beqz a1, .LBB26_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: slti a0, a2, 0 +; RV32-NEXT: slti a3, a1, 0 ; RV32-NEXT: j .LBB26_3 ; RV32-NEXT: .LBB26_2: -; RV32-NEXT: seqz a0, a3 +; RV32-NEXT: seqz a3, a0 ; RV32-NEXT: .LBB26_3: # %entry -; RV32-NEXT: xori a1, a3, 1 -; RV32-NEXT: or a4, a1, a2 -; RV32-NEXT: li a1, 0 +; RV32-NEXT: xori a2, a0, 1 +; RV32-NEXT: or a4, a2, a1 +; RV32-NEXT: li a2, 0 ; RV32-NEXT: beqz a4, .LBB26_5 ; RV32-NEXT: # %bb.4: # %entry -; RV32-NEXT: mv a1, a0 +; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB26_5: # %entry -; RV32-NEXT: bnez a1, .LBB26_9 +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: bnez a2, .LBB26_7 ; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: li a0, 0 -; RV32-NEXT: li a2, 0 -; RV32-NEXT: li a3, 1 -; RV32-NEXT: bnez a2, .LBB26_10 -; RV32-NEXT: .LBB26_7: -; RV32-NEXT: snez a4, a3 -; RV32-NEXT: bnez a1, .LBB26_11 -; RV32-NEXT: .LBB26_8: -; RV32-NEXT: snez a5, a0 -; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB26_12 -; RV32-NEXT: j .LBB26_13 +; RV32-NEXT: li a0, 1 +; RV32-NEXT: .LBB26_7: # %entry +; RV32-NEXT: lw a5, 8(sp) +; RV32-NEXT: and a3, a2, a1 +; RV32-NEXT: and a1, a2, a4 +; RV32-NEXT: beqz a3, .LBB26_9 +; RV32-NEXT: # %bb.8: # %entry +; RV32-NEXT: sgtz a4, a3 +; RV32-NEXT: j .LBB26_10 ; RV32-NEXT: .LBB26_9: -; RV32-NEXT: lw a1, 12(sp) -; RV32-NEXT: lw a0, 8(sp) -; RV32-NEXT: beqz a2, .LBB26_7 +; RV32-NEXT: snez a4, a0 ; RV32-NEXT: .LBB26_10: # %entry -; RV32-NEXT: sgtz a4, a2 -; RV32-NEXT: beqz a1, .LBB26_8 -; RV32-NEXT: .LBB26_11: # %entry -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: beqz a2, .LBB26_13 +; RV32-NEXT: and a2, a2, a5 +; RV32-NEXT: mv a5, a2 +; RV32-NEXT: beqz a1, .LBB26_12 +; RV32-NEXT: # %bb.11: # %entry +; RV32-NEXT: mv a5, a1 ; RV32-NEXT: .LBB26_12: # %entry +; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: beqz a0, .LBB26_14 +; RV32-NEXT: # %bb.13: # %entry ; RV32-NEXT: mv a5, a4 -; RV32-NEXT: .LBB26_13: # %entry -; RV32-NEXT: bnez a5, .LBB26_15 -; RV32-NEXT: # %bb.14: # %entry -; RV32-NEXT: li a0, 0 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: .LBB26_15: # %entry +; RV32-NEXT: .LBB26_14: # %entry +; RV32-NEXT: neg a3, a5 +; RV32-NEXT: and a0, a3, a2 +; RV32-NEXT: and a1, a3, a1 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -2001,22 +1891,18 @@ ; RV64-NEXT: fmv.x.w a0, fa0 ; RV64-NEXT: call __extendhfsf2@plt ; RV64-NEXT: call __fixsfti@plt +; RV64-NEXT: slti a2, a1, 1 ; RV64-NEXT: blez a1, .LBB26_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: li a0, 0 ; RV64-NEXT: li a1, 1 ; RV64-NEXT: .LBB26_2: # %entry +; RV64-NEXT: and a0, a2, a0 +; RV64-NEXT: mv a2, a0 ; RV64-NEXT: beqz a1, .LBB26_4 ; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: sgtz a1, a1 -; RV64-NEXT: beqz a1, .LBB26_5 -; RV64-NEXT: j .LBB26_6 -; RV64-NEXT: .LBB26_4: -; RV64-NEXT: snez a1, a0 -; RV64-NEXT: bnez a1, .LBB26_6 -; RV64-NEXT: .LBB26_5: # %entry -; RV64-NEXT: li a0, 0 -; RV64-NEXT: .LBB26_6: # %entry +; RV64-NEXT: sgtz a2, a1 +; RV64-NEXT: .LBB26_4: # %entry +; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -2211,36 +2097,30 @@ ; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixdfdi@plt ; RV32IF-NEXT: mv a2, a0 -; RV32IF-NEXT: bgez a1, .LBB29_7 +; RV32IF-NEXT: bgez a1, .LBB29_6 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: bnez a1, .LBB29_8 +; RV32IF-NEXT: bnez a1, .LBB29_7 ; RV32IF-NEXT: .LBB29_2: # %entry -; RV32IF-NEXT: bgez a1, .LBB29_9 +; RV32IF-NEXT: bgez a1, .LBB29_8 ; RV32IF-NEXT: .LBB29_3: # %entry -; RV32IF-NEXT: mv a2, a0 -; RV32IF-NEXT: blez a1, .LBB29_10 +; RV32IF-NEXT: beqz a1, .LBB29_5 ; RV32IF-NEXT: .LBB29_4: # %entry -; RV32IF-NEXT: beqz a1, .LBB29_6 +; RV32IF-NEXT: sgtz a1, a1 +; RV32IF-NEXT: and a0, a1, a0 ; RV32IF-NEXT: .LBB29_5: # %entry -; RV32IF-NEXT: mv a0, a2 -; RV32IF-NEXT: .LBB29_6: # %entry ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret -; RV32IF-NEXT: .LBB29_7: # %entry +; RV32IF-NEXT: .LBB29_6: # %entry ; RV32IF-NEXT: li a2, -1 ; RV32IF-NEXT: beqz a1, .LBB29_2 -; RV32IF-NEXT: .LBB29_8: # %entry +; RV32IF-NEXT: .LBB29_7: # %entry ; RV32IF-NEXT: mv a0, a2 ; RV32IF-NEXT: bltz a1, .LBB29_3 -; RV32IF-NEXT: .LBB29_9: # %entry +; RV32IF-NEXT: .LBB29_8: # %entry ; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: mv a2, a0 -; RV32IF-NEXT: bgtz a1, .LBB29_4 -; RV32IF-NEXT: .LBB29_10: # %entry -; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: bnez a1, .LBB29_5 -; RV32IF-NEXT: j .LBB29_6 +; RV32IF-NEXT: bnez a1, .LBB29_4 +; RV32IF-NEXT: j .LBB29_5 ; ; RV64IF-LABEL: ustest_f64i32_mm: ; RV64IF: # %bb.0: # %entry @@ -2531,36 +2411,30 @@ ; RV32-NEXT: call __extendhfsf2@plt ; RV32-NEXT: call __fixsfdi@plt ; RV32-NEXT: mv a2, a0 -; RV32-NEXT: bgez a1, .LBB35_7 +; RV32-NEXT: bgez a1, .LBB35_6 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: bnez a1, .LBB35_8 +; RV32-NEXT: bnez a1, .LBB35_7 ; RV32-NEXT: .LBB35_2: # %entry -; RV32-NEXT: bgez a1, .LBB35_9 +; RV32-NEXT: bgez a1, .LBB35_8 ; RV32-NEXT: .LBB35_3: # %entry -; RV32-NEXT: mv a2, a0 -; RV32-NEXT: blez a1, .LBB35_10 +; RV32-NEXT: beqz a1, .LBB35_5 ; RV32-NEXT: .LBB35_4: # %entry -; RV32-NEXT: beqz a1, .LBB35_6 +; RV32-NEXT: sgtz a1, a1 +; RV32-NEXT: and a0, a1, a0 ; RV32-NEXT: .LBB35_5: # %entry -; RV32-NEXT: mv a0, a2 -; RV32-NEXT: .LBB35_6: # %entry ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret -; RV32-NEXT: .LBB35_7: # %entry +; RV32-NEXT: .LBB35_6: # %entry ; RV32-NEXT: li a2, -1 ; RV32-NEXT: beqz a1, .LBB35_2 -; RV32-NEXT: .LBB35_8: # %entry +; RV32-NEXT: .LBB35_7: # %entry ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: bltz a1, .LBB35_3 -; RV32-NEXT: .LBB35_9: # %entry +; RV32-NEXT: .LBB35_8: # %entry ; RV32-NEXT: li a1, 0 -; RV32-NEXT: mv a2, a0 -; RV32-NEXT: bgtz a1, .LBB35_4 -; RV32-NEXT: .LBB35_10: # %entry -; RV32-NEXT: li a2, 0 -; RV32-NEXT: bnez a1, .LBB35_5 -; RV32-NEXT: j .LBB35_6 +; RV32-NEXT: bnez a1, .LBB35_4 +; RV32-NEXT: j .LBB35_5 ; ; RV64-LABEL: ustest_f16i32_mm: ; RV64: # %bb.0: # %entry @@ -3129,101 +3003,91 @@ ; RV32IF-NEXT: lui a4, 524288 ; RV32IF-NEXT: addi a6, a4, -1 ; RV32IF-NEXT: mv t0, a5 -; RV32IF-NEXT: bgeu a1, a6, .LBB45_19 +; RV32IF-NEXT: bgeu a1, a6, .LBB45_17 ; RV32IF-NEXT: # %bb.3: # %entry ; RV32IF-NEXT: lw a0, 16(sp) -; RV32IF-NEXT: bne a1, a6, .LBB45_20 +; RV32IF-NEXT: bne a1, a6, .LBB45_18 ; RV32IF-NEXT: .LBB45_4: # %entry ; RV32IF-NEXT: or t0, a0, a3 -; RV32IF-NEXT: bnez t0, .LBB45_21 +; RV32IF-NEXT: bnez t0, .LBB45_19 ; RV32IF-NEXT: .LBB45_5: # %entry ; RV32IF-NEXT: mv a7, a1 -; RV32IF-NEXT: bgez a3, .LBB45_22 +; RV32IF-NEXT: bgez a3, .LBB45_20 ; RV32IF-NEXT: .LBB45_6: # %entry -; RV32IF-NEXT: bgeu a1, a6, .LBB45_23 +; RV32IF-NEXT: bgeu a1, a6, .LBB45_21 ; RV32IF-NEXT: .LBB45_7: # %entry -; RV32IF-NEXT: bnez t0, .LBB45_24 +; RV32IF-NEXT: bnez t0, .LBB45_22 ; RV32IF-NEXT: .LBB45_8: # %entry ; RV32IF-NEXT: li a6, 0 -; RV32IF-NEXT: bnez a3, .LBB45_25 +; RV32IF-NEXT: bnez a3, .LBB45_23 ; RV32IF-NEXT: .LBB45_9: # %entry -; RV32IF-NEXT: bgez a3, .LBB45_26 +; RV32IF-NEXT: bgez a3, .LBB45_24 ; RV32IF-NEXT: .LBB45_10: # %entry -; RV32IF-NEXT: mv a7, a5 -; RV32IF-NEXT: bgeu a4, a1, .LBB45_27 -; RV32IF-NEXT: .LBB45_11: # %entry ; RV32IF-NEXT: mv a0, a5 -; RV32IF-NEXT: bne a1, a4, .LBB45_28 +; RV32IF-NEXT: bne a1, a4, .LBB45_25 +; RV32IF-NEXT: .LBB45_11: # %entry +; RV32IF-NEXT: and a6, a6, a3 +; RV32IF-NEXT: bne a6, a2, .LBB45_26 ; RV32IF-NEXT: .LBB45_12: # %entry -; RV32IF-NEXT: bltz a3, .LBB45_29 +; RV32IF-NEXT: mv a5, a1 +; RV32IF-NEXT: bltz a3, .LBB45_27 ; RV32IF-NEXT: .LBB45_13: # %entry -; RV32IF-NEXT: and a6, a6, a3 -; RV32IF-NEXT: bne a6, a2, .LBB45_30 +; RV32IF-NEXT: bgeu a4, a1, .LBB45_28 ; RV32IF-NEXT: .LBB45_14: # %entry -; RV32IF-NEXT: mv a5, a1 -; RV32IF-NEXT: bltz a3, .LBB45_31 +; RV32IF-NEXT: beq a6, a2, .LBB45_16 ; RV32IF-NEXT: .LBB45_15: # %entry -; RV32IF-NEXT: bgeu a4, a1, .LBB45_32 -; RV32IF-NEXT: .LBB45_16: # %entry -; RV32IF-NEXT: beq a6, a2, .LBB45_18 -; RV32IF-NEXT: .LBB45_17: # %entry ; RV32IF-NEXT: mv a1, a5 -; RV32IF-NEXT: .LBB45_18: # %entry +; RV32IF-NEXT: .LBB45_16: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret -; RV32IF-NEXT: .LBB45_19: # %entry +; RV32IF-NEXT: .LBB45_17: # %entry ; RV32IF-NEXT: li t0, -1 ; RV32IF-NEXT: lw a0, 16(sp) ; RV32IF-NEXT: beq a1, a6, .LBB45_4 -; RV32IF-NEXT: .LBB45_20: # %entry +; RV32IF-NEXT: .LBB45_18: # %entry ; RV32IF-NEXT: mv a5, t0 ; RV32IF-NEXT: or t0, a0, a3 ; RV32IF-NEXT: beqz t0, .LBB45_5 -; RV32IF-NEXT: .LBB45_21: # %entry +; RV32IF-NEXT: .LBB45_19: # %entry ; RV32IF-NEXT: mv a5, a7 ; RV32IF-NEXT: mv a7, a1 ; RV32IF-NEXT: bltz a3, .LBB45_6 -; RV32IF-NEXT: .LBB45_22: # %entry +; RV32IF-NEXT: .LBB45_20: # %entry ; RV32IF-NEXT: mv a7, a6 ; RV32IF-NEXT: bltu a1, a6, .LBB45_7 -; RV32IF-NEXT: .LBB45_23: # %entry +; RV32IF-NEXT: .LBB45_21: # %entry ; RV32IF-NEXT: mv a1, a6 ; RV32IF-NEXT: beqz t0, .LBB45_8 -; RV32IF-NEXT: .LBB45_24: # %entry +; RV32IF-NEXT: .LBB45_22: # %entry ; RV32IF-NEXT: mv a1, a7 ; RV32IF-NEXT: li a6, 0 ; RV32IF-NEXT: beqz a3, .LBB45_9 -; RV32IF-NEXT: .LBB45_25: # %entry +; RV32IF-NEXT: .LBB45_23: # %entry ; RV32IF-NEXT: srai a6, a3, 31 ; RV32IF-NEXT: and a6, a6, a0 ; RV32IF-NEXT: bltz a3, .LBB45_10 -; RV32IF-NEXT: .LBB45_26: # %entry +; RV32IF-NEXT: .LBB45_24: # %entry ; RV32IF-NEXT: li a3, 0 -; RV32IF-NEXT: mv a7, a5 -; RV32IF-NEXT: bltu a4, a1, .LBB45_11 -; RV32IF-NEXT: .LBB45_27: # %entry -; RV32IF-NEXT: li a7, 0 ; RV32IF-NEXT: mv a0, a5 -; RV32IF-NEXT: beq a1, a4, .LBB45_12 -; RV32IF-NEXT: .LBB45_28: # %entry -; RV32IF-NEXT: mv a0, a7 -; RV32IF-NEXT: bgez a3, .LBB45_13 -; RV32IF-NEXT: .LBB45_29: # %entry -; RV32IF-NEXT: li a5, 0 +; RV32IF-NEXT: beq a1, a4, .LBB45_11 +; RV32IF-NEXT: .LBB45_25: # %entry +; RV32IF-NEXT: sltu a0, a4, a1 +; RV32IF-NEXT: and a0, a0, a5 ; RV32IF-NEXT: and a6, a6, a3 -; RV32IF-NEXT: beq a6, a2, .LBB45_14 -; RV32IF-NEXT: .LBB45_30: # %entry -; RV32IF-NEXT: mv a0, a5 +; RV32IF-NEXT: beq a6, a2, .LBB45_12 +; RV32IF-NEXT: .LBB45_26: # %entry +; RV32IF-NEXT: slt a0, a2, a3 +; RV32IF-NEXT: and a0, a0, a5 ; RV32IF-NEXT: mv a5, a1 -; RV32IF-NEXT: bgez a3, .LBB45_15 -; RV32IF-NEXT: .LBB45_31: # %entry +; RV32IF-NEXT: bgez a3, .LBB45_13 +; RV32IF-NEXT: .LBB45_27: # %entry ; RV32IF-NEXT: lui a5, 524288 -; RV32IF-NEXT: bltu a4, a1, .LBB45_16 -; RV32IF-NEXT: .LBB45_32: # %entry +; RV32IF-NEXT: bltu a4, a1, .LBB45_14 +; RV32IF-NEXT: .LBB45_28: # %entry ; RV32IF-NEXT: lui a1, 524288 -; RV32IF-NEXT: bne a6, a2, .LBB45_17 -; RV32IF-NEXT: j .LBB45_18 +; RV32IF-NEXT: bne a6, a2, .LBB45_15 +; RV32IF-NEXT: j .LBB45_16 ; ; RV64IF-LABEL: stest_f64i64_mm: ; RV64IF: # %bb.0: # %entry @@ -3296,101 +3160,91 @@ ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: addi a6, a4, -1 ; RV32IFD-NEXT: mv t0, a5 -; RV32IFD-NEXT: bgeu a1, a6, .LBB45_19 +; RV32IFD-NEXT: bgeu a1, a6, .LBB45_17 ; RV32IFD-NEXT: # %bb.3: # %entry ; RV32IFD-NEXT: lw a0, 16(sp) -; RV32IFD-NEXT: bne a1, a6, .LBB45_20 +; RV32IFD-NEXT: bne a1, a6, .LBB45_18 ; RV32IFD-NEXT: .LBB45_4: # %entry ; RV32IFD-NEXT: or t0, a0, a3 -; RV32IFD-NEXT: bnez t0, .LBB45_21 +; RV32IFD-NEXT: bnez t0, .LBB45_19 ; RV32IFD-NEXT: .LBB45_5: # %entry ; RV32IFD-NEXT: mv a7, a1 -; RV32IFD-NEXT: bgez a3, .LBB45_22 +; RV32IFD-NEXT: bgez a3, .LBB45_20 ; RV32IFD-NEXT: .LBB45_6: # %entry -; RV32IFD-NEXT: bgeu a1, a6, .LBB45_23 +; RV32IFD-NEXT: bgeu a1, a6, .LBB45_21 ; RV32IFD-NEXT: .LBB45_7: # %entry -; RV32IFD-NEXT: bnez t0, .LBB45_24 +; RV32IFD-NEXT: bnez t0, .LBB45_22 ; RV32IFD-NEXT: .LBB45_8: # %entry ; RV32IFD-NEXT: li a6, 0 -; RV32IFD-NEXT: bnez a3, .LBB45_25 +; RV32IFD-NEXT: bnez a3, .LBB45_23 ; RV32IFD-NEXT: .LBB45_9: # %entry -; RV32IFD-NEXT: bgez a3, .LBB45_26 +; RV32IFD-NEXT: bgez a3, .LBB45_24 ; RV32IFD-NEXT: .LBB45_10: # %entry -; RV32IFD-NEXT: mv a7, a5 -; RV32IFD-NEXT: bgeu a4, a1, .LBB45_27 -; RV32IFD-NEXT: .LBB45_11: # %entry ; RV32IFD-NEXT: mv a0, a5 -; RV32IFD-NEXT: bne a1, a4, .LBB45_28 +; RV32IFD-NEXT: bne a1, a4, .LBB45_25 +; RV32IFD-NEXT: .LBB45_11: # %entry +; RV32IFD-NEXT: and a6, a6, a3 +; RV32IFD-NEXT: bne a6, a2, .LBB45_26 ; RV32IFD-NEXT: .LBB45_12: # %entry -; RV32IFD-NEXT: bltz a3, .LBB45_29 +; RV32IFD-NEXT: mv a5, a1 +; RV32IFD-NEXT: bltz a3, .LBB45_27 ; RV32IFD-NEXT: .LBB45_13: # %entry -; RV32IFD-NEXT: and a6, a6, a3 -; RV32IFD-NEXT: bne a6, a2, .LBB45_30 +; RV32IFD-NEXT: bgeu a4, a1, .LBB45_28 ; RV32IFD-NEXT: .LBB45_14: # %entry -; RV32IFD-NEXT: mv a5, a1 -; RV32IFD-NEXT: bltz a3, .LBB45_31 +; RV32IFD-NEXT: beq a6, a2, .LBB45_16 ; RV32IFD-NEXT: .LBB45_15: # %entry -; RV32IFD-NEXT: bgeu a4, a1, .LBB45_32 -; RV32IFD-NEXT: .LBB45_16: # %entry -; RV32IFD-NEXT: beq a6, a2, .LBB45_18 -; RV32IFD-NEXT: .LBB45_17: # %entry ; RV32IFD-NEXT: mv a1, a5 -; RV32IFD-NEXT: .LBB45_18: # %entry +; RV32IFD-NEXT: .LBB45_16: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret -; RV32IFD-NEXT: .LBB45_19: # %entry +; RV32IFD-NEXT: .LBB45_17: # %entry ; RV32IFD-NEXT: li t0, -1 ; RV32IFD-NEXT: lw a0, 16(sp) ; RV32IFD-NEXT: beq a1, a6, .LBB45_4 -; RV32IFD-NEXT: .LBB45_20: # %entry +; RV32IFD-NEXT: .LBB45_18: # %entry ; RV32IFD-NEXT: mv a5, t0 ; RV32IFD-NEXT: or t0, a0, a3 ; RV32IFD-NEXT: beqz t0, .LBB45_5 -; RV32IFD-NEXT: .LBB45_21: # %entry +; RV32IFD-NEXT: .LBB45_19: # %entry ; RV32IFD-NEXT: mv a5, a7 ; RV32IFD-NEXT: mv a7, a1 ; RV32IFD-NEXT: bltz a3, .LBB45_6 -; RV32IFD-NEXT: .LBB45_22: # %entry +; RV32IFD-NEXT: .LBB45_20: # %entry ; RV32IFD-NEXT: mv a7, a6 ; RV32IFD-NEXT: bltu a1, a6, .LBB45_7 -; RV32IFD-NEXT: .LBB45_23: # %entry +; RV32IFD-NEXT: .LBB45_21: # %entry ; RV32IFD-NEXT: mv a1, a6 ; RV32IFD-NEXT: beqz t0, .LBB45_8 -; RV32IFD-NEXT: .LBB45_24: # %entry +; RV32IFD-NEXT: .LBB45_22: # %entry ; RV32IFD-NEXT: mv a1, a7 ; RV32IFD-NEXT: li a6, 0 ; RV32IFD-NEXT: beqz a3, .LBB45_9 -; RV32IFD-NEXT: .LBB45_25: # %entry +; RV32IFD-NEXT: .LBB45_23: # %entry ; RV32IFD-NEXT: srai a6, a3, 31 ; RV32IFD-NEXT: and a6, a6, a0 ; RV32IFD-NEXT: bltz a3, .LBB45_10 -; RV32IFD-NEXT: .LBB45_26: # %entry +; RV32IFD-NEXT: .LBB45_24: # %entry ; RV32IFD-NEXT: li a3, 0 -; RV32IFD-NEXT: mv a7, a5 -; RV32IFD-NEXT: bltu a4, a1, .LBB45_11 -; RV32IFD-NEXT: .LBB45_27: # %entry -; RV32IFD-NEXT: li a7, 0 ; RV32IFD-NEXT: mv a0, a5 -; RV32IFD-NEXT: beq a1, a4, .LBB45_12 -; RV32IFD-NEXT: .LBB45_28: # %entry -; RV32IFD-NEXT: mv a0, a7 -; RV32IFD-NEXT: bgez a3, .LBB45_13 -; RV32IFD-NEXT: .LBB45_29: # %entry -; RV32IFD-NEXT: li a5, 0 +; RV32IFD-NEXT: beq a1, a4, .LBB45_11 +; RV32IFD-NEXT: .LBB45_25: # %entry +; RV32IFD-NEXT: sltu a0, a4, a1 +; RV32IFD-NEXT: and a0, a0, a5 ; RV32IFD-NEXT: and a6, a6, a3 -; RV32IFD-NEXT: beq a6, a2, .LBB45_14 -; RV32IFD-NEXT: .LBB45_30: # %entry -; RV32IFD-NEXT: mv a0, a5 +; RV32IFD-NEXT: beq a6, a2, .LBB45_12 +; RV32IFD-NEXT: .LBB45_26: # %entry +; RV32IFD-NEXT: slt a0, a2, a3 +; RV32IFD-NEXT: and a0, a0, a5 ; RV32IFD-NEXT: mv a5, a1 -; RV32IFD-NEXT: bgez a3, .LBB45_15 -; RV32IFD-NEXT: .LBB45_31: # %entry +; RV32IFD-NEXT: bgez a3, .LBB45_13 +; RV32IFD-NEXT: .LBB45_27: # %entry ; RV32IFD-NEXT: lui a5, 524288 -; RV32IFD-NEXT: bltu a4, a1, .LBB45_16 -; RV32IFD-NEXT: .LBB45_32: # %entry +; RV32IFD-NEXT: bltu a4, a1, .LBB45_14 +; RV32IFD-NEXT: .LBB45_28: # %entry ; RV32IFD-NEXT: lui a1, 524288 -; RV32IFD-NEXT: bne a6, a2, .LBB45_17 -; RV32IFD-NEXT: j .LBB45_18 +; RV32IFD-NEXT: bne a6, a2, .LBB45_15 +; RV32IFD-NEXT: j .LBB45_16 ; ; RV64IFD-LABEL: stest_f64i64_mm: ; RV64IFD: # %bb.0: # %entry @@ -3419,40 +3273,21 @@ ; RV32IF-NEXT: mv a1, a0 ; RV32IF-NEXT: addi a0, sp, 8 ; RV32IF-NEXT: call __fixunsdfti@plt -; RV32IF-NEXT: lw a0, 20(sp) -; RV32IF-NEXT: lw a3, 16(sp) +; RV32IF-NEXT: lw a2, 16(sp) +; RV32IF-NEXT: lw a3, 20(sp) +; RV32IF-NEXT: xori a0, a2, 1 +; RV32IF-NEXT: or a4, a0, a3 +; RV32IF-NEXT: li a0, 0 ; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: beqz a0, .LBB46_3 +; RV32IF-NEXT: beqz a4, .LBB46_2 ; RV32IF-NEXT: # %bb.1: # %entry -; RV32IF-NEXT: mv a2, a1 -; RV32IF-NEXT: beq a2, a1, .LBB46_4 -; RV32IF-NEXT: .LBB46_2: -; RV32IF-NEXT: lw a4, 8(sp) -; RV32IF-NEXT: j .LBB46_5 -; RV32IF-NEXT: .LBB46_3: -; RV32IF-NEXT: seqz a2, a3 -; RV32IF-NEXT: bne a2, a1, .LBB46_2 -; RV32IF-NEXT: .LBB46_4: # %entry -; RV32IF-NEXT: mv a4, a1 -; RV32IF-NEXT: .LBB46_5: # %entry -; RV32IF-NEXT: xori a3, a3, 1 -; RV32IF-NEXT: or a3, a3, a0 -; RV32IF-NEXT: mv a0, a1 -; RV32IF-NEXT: beq a3, a1, .LBB46_7 -; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: mv a0, a4 -; RV32IF-NEXT: .LBB46_7: # %entry -; RV32IF-NEXT: bne a2, a1, .LBB46_9 -; RV32IF-NEXT: # %bb.8: # %entry -; RV32IF-NEXT: mv a2, a1 -; RV32IF-NEXT: bne a3, a1, .LBB46_10 -; RV32IF-NEXT: j .LBB46_11 -; RV32IF-NEXT: .LBB46_9: -; RV32IF-NEXT: lw a2, 12(sp) -; RV32IF-NEXT: beq a3, a1, .LBB46_11 -; RV32IF-NEXT: .LBB46_10: # %entry -; RV32IF-NEXT: mv a1, a2 -; RV32IF-NEXT: .LBB46_11: # %entry +; RV32IF-NEXT: lw a0, 8(sp) +; RV32IF-NEXT: lw a1, 12(sp) +; RV32IF-NEXT: or a2, a3, a2 +; RV32IF-NEXT: seqz a2, a2 +; RV32IF-NEXT: and a0, a2, a0 +; RV32IF-NEXT: and a1, a2, a1 +; RV32IF-NEXT: .LBB46_2: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -3464,17 +3299,14 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixunsdfti@plt +; RV64-NEXT: li a3, 1 ; RV64-NEXT: mv a2, a0 ; RV64-NEXT: li a0, 0 -; RV64-NEXT: beqz a1, .LBB46_2 +; RV64-NEXT: beq a1, a3, .LBB46_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: mv a2, a0 +; RV64-NEXT: seqz a0, a1 +; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: .LBB46_2: # %entry -; RV64-NEXT: li a3, 1 -; RV64-NEXT: beq a1, a3, .LBB46_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: .LBB46_4: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -3487,40 +3319,21 @@ ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call __fixunsdfti@plt -; RV32IFD-NEXT: lw a0, 20(sp) -; RV32IFD-NEXT: lw a3, 16(sp) +; RV32IFD-NEXT: lw a2, 16(sp) +; RV32IFD-NEXT: lw a3, 20(sp) +; RV32IFD-NEXT: xori a0, a2, 1 +; RV32IFD-NEXT: or a4, a0, a3 +; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: beqz a0, .LBB46_3 +; RV32IFD-NEXT: beqz a4, .LBB46_2 ; RV32IFD-NEXT: # %bb.1: # %entry -; RV32IFD-NEXT: mv a2, a1 -; RV32IFD-NEXT: beq a2, a1, .LBB46_4 -; RV32IFD-NEXT: .LBB46_2: -; RV32IFD-NEXT: lw a4, 8(sp) -; RV32IFD-NEXT: j .LBB46_5 -; RV32IFD-NEXT: .LBB46_3: -; RV32IFD-NEXT: seqz a2, a3 -; RV32IFD-NEXT: bne a2, a1, .LBB46_2 -; RV32IFD-NEXT: .LBB46_4: # %entry -; RV32IFD-NEXT: mv a4, a1 -; RV32IFD-NEXT: .LBB46_5: # %entry -; RV32IFD-NEXT: xori a3, a3, 1 -; RV32IFD-NEXT: or a3, a3, a0 -; RV32IFD-NEXT: mv a0, a1 -; RV32IFD-NEXT: beq a3, a1, .LBB46_7 -; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: mv a0, a4 -; RV32IFD-NEXT: .LBB46_7: # %entry -; RV32IFD-NEXT: bne a2, a1, .LBB46_9 -; RV32IFD-NEXT: # %bb.8: # %entry -; RV32IFD-NEXT: mv a2, a1 -; RV32IFD-NEXT: bne a3, a1, .LBB46_10 -; RV32IFD-NEXT: j .LBB46_11 -; RV32IFD-NEXT: .LBB46_9: -; RV32IFD-NEXT: lw a2, 12(sp) -; RV32IFD-NEXT: beq a3, a1, .LBB46_11 -; RV32IFD-NEXT: .LBB46_10: # %entry -; RV32IFD-NEXT: mv a1, a2 -; RV32IFD-NEXT: .LBB46_11: # %entry +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: or a2, a3, a2 +; RV32IFD-NEXT: seqz a2, a2 +; RV32IFD-NEXT: and a0, a2, a0 +; RV32IFD-NEXT: and a1, a2, a1 +; RV32IFD-NEXT: .LBB46_2: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -3544,105 +3357,70 @@ ; RV32IF-NEXT: call __fixdfti@plt ; RV32IF-NEXT: lw a0, 16(sp) ; RV32IF-NEXT: lw a2, 20(sp) -; RV32IF-NEXT: li a1, 1 -; RV32IF-NEXT: mv a4, a0 -; RV32IF-NEXT: bgez a2, .LBB47_5 +; RV32IF-NEXT: li a4, 1 +; RV32IF-NEXT: mv a1, a0 +; RV32IF-NEXT: bgez a2, .LBB47_4 ; RV32IF-NEXT: # %bb.1: # %entry ; RV32IF-NEXT: mv a3, a0 -; RV32IF-NEXT: bgeu a0, a1, .LBB47_6 +; RV32IF-NEXT: bgeu a0, a4, .LBB47_5 ; RV32IF-NEXT: .LBB47_2: # %entry -; RV32IF-NEXT: beqz a2, .LBB47_7 +; RV32IF-NEXT: beqz a2, .LBB47_6 ; RV32IF-NEXT: .LBB47_3: # %entry -; RV32IF-NEXT: slti a1, a2, 0 -; RV32IF-NEXT: mv a3, a4 -; RV32IF-NEXT: beqz a1, .LBB47_8 -; RV32IF-NEXT: .LBB47_4: -; RV32IF-NEXT: lw a5, 8(sp) -; RV32IF-NEXT: j .LBB47_9 -; RV32IF-NEXT: .LBB47_5: # %entry -; RV32IF-NEXT: li a4, 1 +; RV32IF-NEXT: slti a5, a2, 0 +; RV32IF-NEXT: mv a3, a1 +; RV32IF-NEXT: j .LBB47_7 +; RV32IF-NEXT: .LBB47_4: # %entry +; RV32IF-NEXT: li a1, 1 ; RV32IF-NEXT: mv a3, a0 -; RV32IF-NEXT: bltu a0, a1, .LBB47_2 -; RV32IF-NEXT: .LBB47_6: # %entry +; RV32IF-NEXT: bltu a0, a4, .LBB47_2 +; RV32IF-NEXT: .LBB47_5: # %entry ; RV32IF-NEXT: li a3, 1 ; RV32IF-NEXT: bnez a2, .LBB47_3 -; RV32IF-NEXT: .LBB47_7: -; RV32IF-NEXT: seqz a1, a0 -; RV32IF-NEXT: bnez a1, .LBB47_4 -; RV32IF-NEXT: .LBB47_8: # %entry -; RV32IF-NEXT: li a5, 0 -; RV32IF-NEXT: .LBB47_9: # %entry +; RV32IF-NEXT: .LBB47_6: +; RV32IF-NEXT: seqz a5, a0 +; RV32IF-NEXT: .LBB47_7: # %entry ; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: or a0, a0, a2 ; RV32IF-NEXT: li a4, 0 -; RV32IF-NEXT: beqz a0, .LBB47_11 -; RV32IF-NEXT: # %bb.10: # %entry -; RV32IF-NEXT: mv a4, a5 -; RV32IF-NEXT: .LBB47_11: # %entry -; RV32IF-NEXT: bnez a1, .LBB47_13 -; RV32IF-NEXT: # %bb.12: # %entry -; RV32IF-NEXT: li a5, 0 ; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: bnez a0, .LBB47_14 -; RV32IF-NEXT: j .LBB47_15 -; RV32IF-NEXT: .LBB47_13: -; RV32IF-NEXT: lw a5, 12(sp) -; RV32IF-NEXT: li a1, 0 -; RV32IF-NEXT: beqz a0, .LBB47_15 -; RV32IF-NEXT: .LBB47_14: # %entry -; RV32IF-NEXT: mv a1, a5 -; RV32IF-NEXT: .LBB47_15: # %entry -; RV32IF-NEXT: bgez a2, .LBB47_20 -; RV32IF-NEXT: # %bb.16: # %entry -; RV32IF-NEXT: mv a5, a4 -; RV32IF-NEXT: beqz a1, .LBB47_21 -; RV32IF-NEXT: .LBB47_17: # %entry +; RV32IF-NEXT: bnez a0, .LBB47_12 +; RV32IF-NEXT: # %bb.8: # %entry +; RV32IF-NEXT: bgez a2, .LBB47_13 +; RV32IF-NEXT: .LBB47_9: # %entry ; RV32IF-NEXT: mv a0, a4 -; RV32IF-NEXT: bnez a1, .LBB47_22 -; RV32IF-NEXT: .LBB47_18: # %entry -; RV32IF-NEXT: beqz a2, .LBB47_23 -; RV32IF-NEXT: .LBB47_19: # %entry +; RV32IF-NEXT: bnez a1, .LBB47_14 +; RV32IF-NEXT: .LBB47_10: # %entry +; RV32IF-NEXT: beqz a2, .LBB47_15 +; RV32IF-NEXT: .LBB47_11: # %entry ; RV32IF-NEXT: sgtz a5, a2 -; RV32IF-NEXT: beqz a5, .LBB47_24 -; RV32IF-NEXT: j .LBB47_25 -; RV32IF-NEXT: .LBB47_20: # %entry +; RV32IF-NEXT: or a2, a3, a2 +; RV32IF-NEXT: bnez a2, .LBB47_16 +; RV32IF-NEXT: j .LBB47_17 +; RV32IF-NEXT: .LBB47_12: # %entry +; RV32IF-NEXT: lw a0, 8(sp) +; RV32IF-NEXT: lw a1, 12(sp) +; RV32IF-NEXT: and a4, a5, a0 +; RV32IF-NEXT: and a1, a5, a1 +; RV32IF-NEXT: bltz a2, .LBB47_9 +; RV32IF-NEXT: .LBB47_13: # %entry ; RV32IF-NEXT: li a2, 0 -; RV32IF-NEXT: mv a5, a4 -; RV32IF-NEXT: bnez a1, .LBB47_17 -; RV32IF-NEXT: .LBB47_21: # %entry -; RV32IF-NEXT: li a5, 0 ; RV32IF-NEXT: mv a0, a4 -; RV32IF-NEXT: beqz a1, .LBB47_18 -; RV32IF-NEXT: .LBB47_22: # %entry -; RV32IF-NEXT: mv a0, a5 -; RV32IF-NEXT: bnez a2, .LBB47_19 -; RV32IF-NEXT: .LBB47_23: +; RV32IF-NEXT: beqz a1, .LBB47_10 +; RV32IF-NEXT: .LBB47_14: # %entry +; RV32IF-NEXT: snez a0, a1 +; RV32IF-NEXT: and a0, a0, a4 +; RV32IF-NEXT: bnez a2, .LBB47_11 +; RV32IF-NEXT: .LBB47_15: ; RV32IF-NEXT: snez a5, a3 -; RV32IF-NEXT: bnez a5, .LBB47_25 -; RV32IF-NEXT: .LBB47_24: # %entry -; RV32IF-NEXT: li a4, 0 -; RV32IF-NEXT: .LBB47_25: # %entry ; RV32IF-NEXT: or a2, a3, a2 -; RV32IF-NEXT: bnez a2, .LBB47_30 -; RV32IF-NEXT: # %bb.26: # %entry -; RV32IF-NEXT: mv a3, a1 -; RV32IF-NEXT: beqz a5, .LBB47_31 -; RV32IF-NEXT: .LBB47_27: # %entry -; RV32IF-NEXT: beqz a2, .LBB47_29 -; RV32IF-NEXT: .LBB47_28: # %entry -; RV32IF-NEXT: mv a1, a3 -; RV32IF-NEXT: .LBB47_29: # %entry +; RV32IF-NEXT: beqz a2, .LBB47_17 +; RV32IF-NEXT: .LBB47_16: # %entry +; RV32IF-NEXT: and a0, a5, a4 +; RV32IF-NEXT: and a1, a5, a1 +; RV32IF-NEXT: .LBB47_17: # %entry ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret -; RV32IF-NEXT: .LBB47_30: # %entry -; RV32IF-NEXT: mv a0, a4 -; RV32IF-NEXT: mv a3, a1 -; RV32IF-NEXT: bnez a5, .LBB47_27 -; RV32IF-NEXT: .LBB47_31: # %entry -; RV32IF-NEXT: li a3, 0 -; RV32IF-NEXT: bnez a2, .LBB47_28 -; RV32IF-NEXT: j .LBB47_29 ; ; RV64-LABEL: ustest_f64i64_mm: ; RV64: # %bb.0: # %entry @@ -3654,34 +3432,28 @@ ; RV64-NEXT: mv a2, a0 ; RV64-NEXT: li a4, 1 ; RV64-NEXT: mv a3, a1 -; RV64-NEXT: bgtz a1, .LBB47_6 +; RV64-NEXT: bgtz a1, .LBB47_5 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: li a0, 0 -; RV64-NEXT: bne a1, a4, .LBB47_7 +; RV64-NEXT: bne a1, a4, .LBB47_6 ; RV64-NEXT: .LBB47_2: # %entry -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: blez a3, .LBB47_8 +; RV64-NEXT: beqz a3, .LBB47_4 ; RV64-NEXT: .LBB47_3: # %entry -; RV64-NEXT: beqz a3, .LBB47_5 +; RV64-NEXT: sgtz a1, a3 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: .LBB47_4: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB47_5: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret -; RV64-NEXT: .LBB47_6: # %entry -; RV64-NEXT: li a2, 0 +; RV64-NEXT: .LBB47_5: # %entry ; RV64-NEXT: li a3, 1 ; RV64-NEXT: li a0, 0 ; RV64-NEXT: beq a1, a4, .LBB47_2 -; RV64-NEXT: .LBB47_7: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: bgtz a3, .LBB47_3 -; RV64-NEXT: .LBB47_8: # %entry -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bnez a3, .LBB47_4 -; RV64-NEXT: j .LBB47_5 +; RV64-NEXT: .LBB47_6: # %entry +; RV64-NEXT: slti a0, a1, 1 +; RV64-NEXT: and a0, a0, a2 +; RV64-NEXT: bnez a3, .LBB47_3 +; RV64-NEXT: j .LBB47_4 ; ; RV32IFD-LABEL: ustest_f64i64_mm: ; RV32IFD: # %bb.0: # %entry @@ -3693,105 +3465,70 @@ ; RV32IFD-NEXT: call __fixdfti@plt ; RV32IFD-NEXT: lw a0, 16(sp) ; RV32IFD-NEXT: lw a2, 20(sp) -; RV32IFD-NEXT: li a1, 1 -; RV32IFD-NEXT: mv a4, a0 -; RV32IFD-NEXT: bgez a2, .LBB47_5 +; RV32IFD-NEXT: li a4, 1 +; RV32IFD-NEXT: mv a1, a0 +; RV32IFD-NEXT: bgez a2, .LBB47_4 ; RV32IFD-NEXT: # %bb.1: # %entry ; RV32IFD-NEXT: mv a3, a0 -; RV32IFD-NEXT: bgeu a0, a1, .LBB47_6 +; RV32IFD-NEXT: bgeu a0, a4, .LBB47_5 ; RV32IFD-NEXT: .LBB47_2: # %entry -; RV32IFD-NEXT: beqz a2, .LBB47_7 +; RV32IFD-NEXT: beqz a2, .LBB47_6 ; RV32IFD-NEXT: .LBB47_3: # %entry -; RV32IFD-NEXT: slti a1, a2, 0 -; RV32IFD-NEXT: mv a3, a4 -; RV32IFD-NEXT: beqz a1, .LBB47_8 -; RV32IFD-NEXT: .LBB47_4: -; RV32IFD-NEXT: lw a5, 8(sp) -; RV32IFD-NEXT: j .LBB47_9 -; RV32IFD-NEXT: .LBB47_5: # %entry -; RV32IFD-NEXT: li a4, 1 +; RV32IFD-NEXT: slti a5, a2, 0 +; RV32IFD-NEXT: mv a3, a1 +; RV32IFD-NEXT: j .LBB47_7 +; RV32IFD-NEXT: .LBB47_4: # %entry +; RV32IFD-NEXT: li a1, 1 ; RV32IFD-NEXT: mv a3, a0 -; RV32IFD-NEXT: bltu a0, a1, .LBB47_2 -; RV32IFD-NEXT: .LBB47_6: # %entry +; RV32IFD-NEXT: bltu a0, a4, .LBB47_2 +; RV32IFD-NEXT: .LBB47_5: # %entry ; RV32IFD-NEXT: li a3, 1 ; RV32IFD-NEXT: bnez a2, .LBB47_3 -; RV32IFD-NEXT: .LBB47_7: -; RV32IFD-NEXT: seqz a1, a0 -; RV32IFD-NEXT: bnez a1, .LBB47_4 -; RV32IFD-NEXT: .LBB47_8: # %entry -; RV32IFD-NEXT: li a5, 0 -; RV32IFD-NEXT: .LBB47_9: # %entry +; RV32IFD-NEXT: .LBB47_6: +; RV32IFD-NEXT: seqz a5, a0 +; RV32IFD-NEXT: .LBB47_7: # %entry ; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: or a0, a0, a2 ; RV32IFD-NEXT: li a4, 0 -; RV32IFD-NEXT: beqz a0, .LBB47_11 -; RV32IFD-NEXT: # %bb.10: # %entry -; RV32IFD-NEXT: mv a4, a5 -; RV32IFD-NEXT: .LBB47_11: # %entry -; RV32IFD-NEXT: bnez a1, .LBB47_13 -; RV32IFD-NEXT: # %bb.12: # %entry -; RV32IFD-NEXT: li a5, 0 -; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: bnez a0, .LBB47_14 -; RV32IFD-NEXT: j .LBB47_15 -; RV32IFD-NEXT: .LBB47_13: -; RV32IFD-NEXT: lw a5, 12(sp) ; RV32IFD-NEXT: li a1, 0 -; RV32IFD-NEXT: beqz a0, .LBB47_15 -; RV32IFD-NEXT: .LBB47_14: # %entry -; RV32IFD-NEXT: mv a1, a5 -; RV32IFD-NEXT: .LBB47_15: # %entry -; RV32IFD-NEXT: bgez a2, .LBB47_20 -; RV32IFD-NEXT: # %bb.16: # %entry -; RV32IFD-NEXT: mv a5, a4 -; RV32IFD-NEXT: beqz a1, .LBB47_21 -; RV32IFD-NEXT: .LBB47_17: # %entry +; RV32IFD-NEXT: bnez a0, .LBB47_12 +; RV32IFD-NEXT: # %bb.8: # %entry +; RV32IFD-NEXT: bgez a2, .LBB47_13 +; RV32IFD-NEXT: .LBB47_9: # %entry ; RV32IFD-NEXT: mv a0, a4 -; RV32IFD-NEXT: bnez a1, .LBB47_22 -; RV32IFD-NEXT: .LBB47_18: # %entry -; RV32IFD-NEXT: beqz a2, .LBB47_23 -; RV32IFD-NEXT: .LBB47_19: # %entry +; RV32IFD-NEXT: bnez a1, .LBB47_14 +; RV32IFD-NEXT: .LBB47_10: # %entry +; RV32IFD-NEXT: beqz a2, .LBB47_15 +; RV32IFD-NEXT: .LBB47_11: # %entry ; RV32IFD-NEXT: sgtz a5, a2 -; RV32IFD-NEXT: beqz a5, .LBB47_24 -; RV32IFD-NEXT: j .LBB47_25 -; RV32IFD-NEXT: .LBB47_20: # %entry +; RV32IFD-NEXT: or a2, a3, a2 +; RV32IFD-NEXT: bnez a2, .LBB47_16 +; RV32IFD-NEXT: j .LBB47_17 +; RV32IFD-NEXT: .LBB47_12: # %entry +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: and a4, a5, a0 +; RV32IFD-NEXT: and a1, a5, a1 +; RV32IFD-NEXT: bltz a2, .LBB47_9 +; RV32IFD-NEXT: .LBB47_13: # %entry ; RV32IFD-NEXT: li a2, 0 -; RV32IFD-NEXT: mv a5, a4 -; RV32IFD-NEXT: bnez a1, .LBB47_17 -; RV32IFD-NEXT: .LBB47_21: # %entry -; RV32IFD-NEXT: li a5, 0 ; RV32IFD-NEXT: mv a0, a4 -; RV32IFD-NEXT: beqz a1, .LBB47_18 -; RV32IFD-NEXT: .LBB47_22: # %entry -; RV32IFD-NEXT: mv a0, a5 -; RV32IFD-NEXT: bnez a2, .LBB47_19 -; RV32IFD-NEXT: .LBB47_23: +; RV32IFD-NEXT: beqz a1, .LBB47_10 +; RV32IFD-NEXT: .LBB47_14: # %entry +; RV32IFD-NEXT: snez a0, a1 +; RV32IFD-NEXT: and a0, a0, a4 +; RV32IFD-NEXT: bnez a2, .LBB47_11 +; RV32IFD-NEXT: .LBB47_15: ; RV32IFD-NEXT: snez a5, a3 -; RV32IFD-NEXT: bnez a5, .LBB47_25 -; RV32IFD-NEXT: .LBB47_24: # %entry -; RV32IFD-NEXT: li a4, 0 -; RV32IFD-NEXT: .LBB47_25: # %entry ; RV32IFD-NEXT: or a2, a3, a2 -; RV32IFD-NEXT: bnez a2, .LBB47_30 -; RV32IFD-NEXT: # %bb.26: # %entry -; RV32IFD-NEXT: mv a3, a1 -; RV32IFD-NEXT: beqz a5, .LBB47_31 -; RV32IFD-NEXT: .LBB47_27: # %entry -; RV32IFD-NEXT: beqz a2, .LBB47_29 -; RV32IFD-NEXT: .LBB47_28: # %entry -; RV32IFD-NEXT: mv a1, a3 -; RV32IFD-NEXT: .LBB47_29: # %entry +; RV32IFD-NEXT: beqz a2, .LBB47_17 +; RV32IFD-NEXT: .LBB47_16: # %entry +; RV32IFD-NEXT: and a0, a5, a4 +; RV32IFD-NEXT: and a1, a5, a1 +; RV32IFD-NEXT: .LBB47_17: # %entry ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret -; RV32IFD-NEXT: .LBB47_30: # %entry -; RV32IFD-NEXT: mv a0, a4 -; RV32IFD-NEXT: mv a3, a1 -; RV32IFD-NEXT: bnez a5, .LBB47_27 -; RV32IFD-NEXT: .LBB47_31: # %entry -; RV32IFD-NEXT: li a3, 0 -; RV32IFD-NEXT: bnez a2, .LBB47_28 -; RV32IFD-NEXT: j .LBB47_29 entry: %conv = fptosi double %x to i128 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) @@ -3821,101 +3558,91 @@ ; RV32-NEXT: lui a4, 524288 ; RV32-NEXT: addi a6, a4, -1 ; RV32-NEXT: mv t0, a5 -; RV32-NEXT: bgeu a1, a6, .LBB48_19 +; RV32-NEXT: bgeu a1, a6, .LBB48_17 ; RV32-NEXT: # %bb.3: # %entry ; RV32-NEXT: lw a0, 16(sp) -; RV32-NEXT: bne a1, a6, .LBB48_20 +; RV32-NEXT: bne a1, a6, .LBB48_18 ; RV32-NEXT: .LBB48_4: # %entry ; RV32-NEXT: or t0, a0, a3 -; RV32-NEXT: bnez t0, .LBB48_21 +; RV32-NEXT: bnez t0, .LBB48_19 ; RV32-NEXT: .LBB48_5: # %entry ; RV32-NEXT: mv a7, a1 -; RV32-NEXT: bgez a3, .LBB48_22 +; RV32-NEXT: bgez a3, .LBB48_20 ; RV32-NEXT: .LBB48_6: # %entry -; RV32-NEXT: bgeu a1, a6, .LBB48_23 +; RV32-NEXT: bgeu a1, a6, .LBB48_21 ; RV32-NEXT: .LBB48_7: # %entry -; RV32-NEXT: bnez t0, .LBB48_24 +; RV32-NEXT: bnez t0, .LBB48_22 ; RV32-NEXT: .LBB48_8: # %entry ; RV32-NEXT: li a6, 0 -; RV32-NEXT: bnez a3, .LBB48_25 +; RV32-NEXT: bnez a3, .LBB48_23 ; RV32-NEXT: .LBB48_9: # %entry -; RV32-NEXT: bgez a3, .LBB48_26 +; RV32-NEXT: bgez a3, .LBB48_24 ; RV32-NEXT: .LBB48_10: # %entry -; RV32-NEXT: mv a7, a5 -; RV32-NEXT: bgeu a4, a1, .LBB48_27 -; RV32-NEXT: .LBB48_11: # %entry ; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bne a1, a4, .LBB48_28 +; RV32-NEXT: bne a1, a4, .LBB48_25 +; RV32-NEXT: .LBB48_11: # %entry +; RV32-NEXT: and a6, a6, a3 +; RV32-NEXT: bne a6, a2, .LBB48_26 ; RV32-NEXT: .LBB48_12: # %entry -; RV32-NEXT: bltz a3, .LBB48_29 +; RV32-NEXT: mv a5, a1 +; RV32-NEXT: bltz a3, .LBB48_27 ; RV32-NEXT: .LBB48_13: # %entry -; RV32-NEXT: and a6, a6, a3 -; RV32-NEXT: bne a6, a2, .LBB48_30 +; RV32-NEXT: bgeu a4, a1, .LBB48_28 ; RV32-NEXT: .LBB48_14: # %entry -; RV32-NEXT: mv a5, a1 -; RV32-NEXT: bltz a3, .LBB48_31 +; RV32-NEXT: beq a6, a2, .LBB48_16 ; RV32-NEXT: .LBB48_15: # %entry -; RV32-NEXT: bgeu a4, a1, .LBB48_32 -; RV32-NEXT: .LBB48_16: # %entry -; RV32-NEXT: beq a6, a2, .LBB48_18 -; RV32-NEXT: .LBB48_17: # %entry ; RV32-NEXT: mv a1, a5 -; RV32-NEXT: .LBB48_18: # %entry +; RV32-NEXT: .LBB48_16: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB48_19: # %entry +; RV32-NEXT: .LBB48_17: # %entry ; RV32-NEXT: li t0, -1 ; RV32-NEXT: lw a0, 16(sp) ; RV32-NEXT: beq a1, a6, .LBB48_4 -; RV32-NEXT: .LBB48_20: # %entry +; RV32-NEXT: .LBB48_18: # %entry ; RV32-NEXT: mv a5, t0 ; RV32-NEXT: or t0, a0, a3 ; RV32-NEXT: beqz t0, .LBB48_5 -; RV32-NEXT: .LBB48_21: # %entry +; RV32-NEXT: .LBB48_19: # %entry ; RV32-NEXT: mv a5, a7 ; RV32-NEXT: mv a7, a1 ; RV32-NEXT: bltz a3, .LBB48_6 -; RV32-NEXT: .LBB48_22: # %entry +; RV32-NEXT: .LBB48_20: # %entry ; RV32-NEXT: mv a7, a6 ; RV32-NEXT: bltu a1, a6, .LBB48_7 -; RV32-NEXT: .LBB48_23: # %entry +; RV32-NEXT: .LBB48_21: # %entry ; RV32-NEXT: mv a1, a6 ; RV32-NEXT: beqz t0, .LBB48_8 -; RV32-NEXT: .LBB48_24: # %entry +; RV32-NEXT: .LBB48_22: # %entry ; RV32-NEXT: mv a1, a7 ; RV32-NEXT: li a6, 0 ; RV32-NEXT: beqz a3, .LBB48_9 -; RV32-NEXT: .LBB48_25: # %entry +; RV32-NEXT: .LBB48_23: # %entry ; RV32-NEXT: srai a6, a3, 31 ; RV32-NEXT: and a6, a6, a0 ; RV32-NEXT: bltz a3, .LBB48_10 -; RV32-NEXT: .LBB48_26: # %entry +; RV32-NEXT: .LBB48_24: # %entry ; RV32-NEXT: li a3, 0 -; RV32-NEXT: mv a7, a5 -; RV32-NEXT: bltu a4, a1, .LBB48_11 -; RV32-NEXT: .LBB48_27: # %entry -; RV32-NEXT: li a7, 0 ; RV32-NEXT: mv a0, a5 -; RV32-NEXT: beq a1, a4, .LBB48_12 -; RV32-NEXT: .LBB48_28: # %entry -; RV32-NEXT: mv a0, a7 -; RV32-NEXT: bgez a3, .LBB48_13 -; RV32-NEXT: .LBB48_29: # %entry -; RV32-NEXT: li a5, 0 +; RV32-NEXT: beq a1, a4, .LBB48_11 +; RV32-NEXT: .LBB48_25: # %entry +; RV32-NEXT: sltu a0, a4, a1 +; RV32-NEXT: and a0, a0, a5 ; RV32-NEXT: and a6, a6, a3 -; RV32-NEXT: beq a6, a2, .LBB48_14 -; RV32-NEXT: .LBB48_30: # %entry -; RV32-NEXT: mv a0, a5 +; RV32-NEXT: beq a6, a2, .LBB48_12 +; RV32-NEXT: .LBB48_26: # %entry +; RV32-NEXT: slt a0, a2, a3 +; RV32-NEXT: and a0, a0, a5 ; RV32-NEXT: mv a5, a1 -; RV32-NEXT: bgez a3, .LBB48_15 -; RV32-NEXT: .LBB48_31: # %entry +; RV32-NEXT: bgez a3, .LBB48_13 +; RV32-NEXT: .LBB48_27: # %entry ; RV32-NEXT: lui a5, 524288 -; RV32-NEXT: bltu a4, a1, .LBB48_16 -; RV32-NEXT: .LBB48_32: # %entry +; RV32-NEXT: bltu a4, a1, .LBB48_14 +; RV32-NEXT: .LBB48_28: # %entry ; RV32-NEXT: lui a1, 524288 -; RV32-NEXT: bne a6, a2, .LBB48_17 -; RV32-NEXT: j .LBB48_18 +; RV32-NEXT: bne a6, a2, .LBB48_15 +; RV32-NEXT: j .LBB48_16 ; ; RV64-LABEL: stest_f32i64_mm: ; RV64: # %bb.0: # %entry @@ -3942,40 +3669,21 @@ ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixunssfti@plt -; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a3, 16(sp) +; RV32-NEXT: lw a2, 16(sp) +; RV32-NEXT: lw a3, 20(sp) +; RV32-NEXT: xori a0, a2, 1 +; RV32-NEXT: or a4, a0, a3 +; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a0, .LBB49_3 +; RV32-NEXT: beqz a4, .LBB49_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: beq a2, a1, .LBB49_4 -; RV32-NEXT: .LBB49_2: -; RV32-NEXT: lw a4, 8(sp) -; RV32-NEXT: j .LBB49_5 -; RV32-NEXT: .LBB49_3: -; RV32-NEXT: seqz a2, a3 -; RV32-NEXT: bne a2, a1, .LBB49_2 -; RV32-NEXT: .LBB49_4: # %entry -; RV32-NEXT: mv a4, a1 -; RV32-NEXT: .LBB49_5: # %entry -; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a0 -; RV32-NEXT: mv a0, a1 -; RV32-NEXT: beq a3, a1, .LBB49_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: mv a0, a4 -; RV32-NEXT: .LBB49_7: # %entry -; RV32-NEXT: bne a2, a1, .LBB49_9 -; RV32-NEXT: # %bb.8: # %entry -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bne a3, a1, .LBB49_10 -; RV32-NEXT: j .LBB49_11 -; RV32-NEXT: .LBB49_9: -; RV32-NEXT: lw a2, 12(sp) -; RV32-NEXT: beq a3, a1, .LBB49_11 -; RV32-NEXT: .LBB49_10: # %entry -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB49_11: # %entry +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: or a2, a3, a2 +; RV32-NEXT: seqz a2, a2 +; RV32-NEXT: and a0, a2, a0 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: .LBB49_2: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -3987,17 +3695,14 @@ ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call __fixunssfti@plt +; RV64-NEXT: li a3, 1 ; RV64-NEXT: mv a2, a0 ; RV64-NEXT: li a0, 0 -; RV64-NEXT: beqz a1, .LBB49_2 +; RV64-NEXT: beq a1, a3, .LBB49_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: mv a2, a0 +; RV64-NEXT: seqz a0, a1 +; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: .LBB49_2: # %entry -; RV64-NEXT: li a3, 1 -; RV64-NEXT: beq a1, a3, .LBB49_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: .LBB49_4: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -4019,105 +3724,70 @@ ; RV32-NEXT: call __fixsfti@plt ; RV32-NEXT: lw a0, 16(sp) ; RV32-NEXT: lw a2, 20(sp) -; RV32-NEXT: li a1, 1 -; RV32-NEXT: mv a4, a0 -; RV32-NEXT: bgez a2, .LBB50_5 +; RV32-NEXT: li a4, 1 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bgez a2, .LBB50_4 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a3, a0 -; RV32-NEXT: bgeu a0, a1, .LBB50_6 +; RV32-NEXT: bgeu a0, a4, .LBB50_5 ; RV32-NEXT: .LBB50_2: # %entry -; RV32-NEXT: beqz a2, .LBB50_7 +; RV32-NEXT: beqz a2, .LBB50_6 ; RV32-NEXT: .LBB50_3: # %entry -; RV32-NEXT: slti a1, a2, 0 -; RV32-NEXT: mv a3, a4 -; RV32-NEXT: beqz a1, .LBB50_8 -; RV32-NEXT: .LBB50_4: -; RV32-NEXT: lw a5, 8(sp) -; RV32-NEXT: j .LBB50_9 -; RV32-NEXT: .LBB50_5: # %entry -; RV32-NEXT: li a4, 1 +; RV32-NEXT: slti a5, a2, 0 +; RV32-NEXT: mv a3, a1 +; RV32-NEXT: j .LBB50_7 +; RV32-NEXT: .LBB50_4: # %entry +; RV32-NEXT: li a1, 1 ; RV32-NEXT: mv a3, a0 -; RV32-NEXT: bltu a0, a1, .LBB50_2 -; RV32-NEXT: .LBB50_6: # %entry +; RV32-NEXT: bltu a0, a4, .LBB50_2 +; RV32-NEXT: .LBB50_5: # %entry ; RV32-NEXT: li a3, 1 ; RV32-NEXT: bnez a2, .LBB50_3 -; RV32-NEXT: .LBB50_7: -; RV32-NEXT: seqz a1, a0 -; RV32-NEXT: bnez a1, .LBB50_4 -; RV32-NEXT: .LBB50_8: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB50_9: # %entry +; RV32-NEXT: .LBB50_6: +; RV32-NEXT: seqz a5, a0 +; RV32-NEXT: .LBB50_7: # %entry ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: li a4, 0 -; RV32-NEXT: beqz a0, .LBB50_11 -; RV32-NEXT: # %bb.10: # %entry -; RV32-NEXT: mv a4, a5 -; RV32-NEXT: .LBB50_11: # %entry -; RV32-NEXT: bnez a1, .LBB50_13 -; RV32-NEXT: # %bb.12: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: bnez a0, .LBB50_14 -; RV32-NEXT: j .LBB50_15 -; RV32-NEXT: .LBB50_13: -; RV32-NEXT: lw a5, 12(sp) ; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a0, .LBB50_15 -; RV32-NEXT: .LBB50_14: # %entry -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: .LBB50_15: # %entry -; RV32-NEXT: bgez a2, .LBB50_20 -; RV32-NEXT: # %bb.16: # %entry -; RV32-NEXT: mv a5, a4 -; RV32-NEXT: beqz a1, .LBB50_21 -; RV32-NEXT: .LBB50_17: # %entry +; RV32-NEXT: bnez a0, .LBB50_12 +; RV32-NEXT: # %bb.8: # %entry +; RV32-NEXT: bgez a2, .LBB50_13 +; RV32-NEXT: .LBB50_9: # %entry ; RV32-NEXT: mv a0, a4 -; RV32-NEXT: bnez a1, .LBB50_22 -; RV32-NEXT: .LBB50_18: # %entry -; RV32-NEXT: beqz a2, .LBB50_23 -; RV32-NEXT: .LBB50_19: # %entry +; RV32-NEXT: bnez a1, .LBB50_14 +; RV32-NEXT: .LBB50_10: # %entry +; RV32-NEXT: beqz a2, .LBB50_15 +; RV32-NEXT: .LBB50_11: # %entry ; RV32-NEXT: sgtz a5, a2 -; RV32-NEXT: beqz a5, .LBB50_24 -; RV32-NEXT: j .LBB50_25 -; RV32-NEXT: .LBB50_20: # %entry +; RV32-NEXT: or a2, a3, a2 +; RV32-NEXT: bnez a2, .LBB50_16 +; RV32-NEXT: j .LBB50_17 +; RV32-NEXT: .LBB50_12: # %entry +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: and a4, a5, a0 +; RV32-NEXT: and a1, a5, a1 +; RV32-NEXT: bltz a2, .LBB50_9 +; RV32-NEXT: .LBB50_13: # %entry ; RV32-NEXT: li a2, 0 -; RV32-NEXT: mv a5, a4 -; RV32-NEXT: bnez a1, .LBB50_17 -; RV32-NEXT: .LBB50_21: # %entry -; RV32-NEXT: li a5, 0 ; RV32-NEXT: mv a0, a4 -; RV32-NEXT: beqz a1, .LBB50_18 -; RV32-NEXT: .LBB50_22: # %entry -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bnez a2, .LBB50_19 -; RV32-NEXT: .LBB50_23: +; RV32-NEXT: beqz a1, .LBB50_10 +; RV32-NEXT: .LBB50_14: # %entry +; RV32-NEXT: snez a0, a1 +; RV32-NEXT: and a0, a0, a4 +; RV32-NEXT: bnez a2, .LBB50_11 +; RV32-NEXT: .LBB50_15: ; RV32-NEXT: snez a5, a3 -; RV32-NEXT: bnez a5, .LBB50_25 -; RV32-NEXT: .LBB50_24: # %entry -; RV32-NEXT: li a4, 0 -; RV32-NEXT: .LBB50_25: # %entry ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB50_30 -; RV32-NEXT: # %bb.26: # %entry -; RV32-NEXT: mv a3, a1 -; RV32-NEXT: beqz a5, .LBB50_31 -; RV32-NEXT: .LBB50_27: # %entry -; RV32-NEXT: beqz a2, .LBB50_29 -; RV32-NEXT: .LBB50_28: # %entry -; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB50_29: # %entry +; RV32-NEXT: beqz a2, .LBB50_17 +; RV32-NEXT: .LBB50_16: # %entry +; RV32-NEXT: and a0, a5, a4 +; RV32-NEXT: and a1, a5, a1 +; RV32-NEXT: .LBB50_17: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB50_30: # %entry -; RV32-NEXT: mv a0, a4 -; RV32-NEXT: mv a3, a1 -; RV32-NEXT: bnez a5, .LBB50_27 -; RV32-NEXT: .LBB50_31: # %entry -; RV32-NEXT: li a3, 0 -; RV32-NEXT: bnez a2, .LBB50_28 -; RV32-NEXT: j .LBB50_29 ; ; RV64-LABEL: ustest_f32i64_mm: ; RV64: # %bb.0: # %entry @@ -4129,34 +3799,28 @@ ; RV64-NEXT: mv a2, a0 ; RV64-NEXT: li a4, 1 ; RV64-NEXT: mv a3, a1 -; RV64-NEXT: bgtz a1, .LBB50_6 +; RV64-NEXT: bgtz a1, .LBB50_5 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: li a0, 0 -; RV64-NEXT: bne a1, a4, .LBB50_7 +; RV64-NEXT: bne a1, a4, .LBB50_6 ; RV64-NEXT: .LBB50_2: # %entry -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: blez a3, .LBB50_8 +; RV64-NEXT: beqz a3, .LBB50_4 ; RV64-NEXT: .LBB50_3: # %entry -; RV64-NEXT: beqz a3, .LBB50_5 +; RV64-NEXT: sgtz a1, a3 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: .LBB50_4: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB50_5: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret -; RV64-NEXT: .LBB50_6: # %entry -; RV64-NEXT: li a2, 0 +; RV64-NEXT: .LBB50_5: # %entry ; RV64-NEXT: li a3, 1 ; RV64-NEXT: li a0, 0 ; RV64-NEXT: beq a1, a4, .LBB50_2 -; RV64-NEXT: .LBB50_7: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: bgtz a3, .LBB50_3 -; RV64-NEXT: .LBB50_8: # %entry -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bnez a3, .LBB50_4 -; RV64-NEXT: j .LBB50_5 +; RV64-NEXT: .LBB50_6: # %entry +; RV64-NEXT: slti a0, a1, 1 +; RV64-NEXT: and a0, a0, a2 +; RV64-NEXT: bnez a3, .LBB50_3 +; RV64-NEXT: j .LBB50_4 entry: %conv = fptosi float %x to i128 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) @@ -4188,101 +3852,91 @@ ; RV32-NEXT: lui a4, 524288 ; RV32-NEXT: addi a6, a4, -1 ; RV32-NEXT: mv t0, a5 -; RV32-NEXT: bgeu a1, a6, .LBB51_19 +; RV32-NEXT: bgeu a1, a6, .LBB51_17 ; RV32-NEXT: # %bb.3: # %entry ; RV32-NEXT: lw a0, 16(sp) -; RV32-NEXT: bne a1, a6, .LBB51_20 +; RV32-NEXT: bne a1, a6, .LBB51_18 ; RV32-NEXT: .LBB51_4: # %entry ; RV32-NEXT: or t0, a0, a3 -; RV32-NEXT: bnez t0, .LBB51_21 +; RV32-NEXT: bnez t0, .LBB51_19 ; RV32-NEXT: .LBB51_5: # %entry ; RV32-NEXT: mv a7, a1 -; RV32-NEXT: bgez a3, .LBB51_22 +; RV32-NEXT: bgez a3, .LBB51_20 ; RV32-NEXT: .LBB51_6: # %entry -; RV32-NEXT: bgeu a1, a6, .LBB51_23 +; RV32-NEXT: bgeu a1, a6, .LBB51_21 ; RV32-NEXT: .LBB51_7: # %entry -; RV32-NEXT: bnez t0, .LBB51_24 +; RV32-NEXT: bnez t0, .LBB51_22 ; RV32-NEXT: .LBB51_8: # %entry ; RV32-NEXT: li a6, 0 -; RV32-NEXT: bnez a3, .LBB51_25 +; RV32-NEXT: bnez a3, .LBB51_23 ; RV32-NEXT: .LBB51_9: # %entry -; RV32-NEXT: bgez a3, .LBB51_26 +; RV32-NEXT: bgez a3, .LBB51_24 ; RV32-NEXT: .LBB51_10: # %entry -; RV32-NEXT: mv a7, a5 -; RV32-NEXT: bgeu a4, a1, .LBB51_27 -; RV32-NEXT: .LBB51_11: # %entry ; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bne a1, a4, .LBB51_28 +; RV32-NEXT: bne a1, a4, .LBB51_25 +; RV32-NEXT: .LBB51_11: # %entry +; RV32-NEXT: and a6, a6, a3 +; RV32-NEXT: bne a6, a2, .LBB51_26 ; RV32-NEXT: .LBB51_12: # %entry -; RV32-NEXT: bltz a3, .LBB51_29 +; RV32-NEXT: mv a5, a1 +; RV32-NEXT: bltz a3, .LBB51_27 ; RV32-NEXT: .LBB51_13: # %entry -; RV32-NEXT: and a6, a6, a3 -; RV32-NEXT: bne a6, a2, .LBB51_30 +; RV32-NEXT: bgeu a4, a1, .LBB51_28 ; RV32-NEXT: .LBB51_14: # %entry -; RV32-NEXT: mv a5, a1 -; RV32-NEXT: bltz a3, .LBB51_31 +; RV32-NEXT: beq a6, a2, .LBB51_16 ; RV32-NEXT: .LBB51_15: # %entry -; RV32-NEXT: bgeu a4, a1, .LBB51_32 -; RV32-NEXT: .LBB51_16: # %entry -; RV32-NEXT: beq a6, a2, .LBB51_18 -; RV32-NEXT: .LBB51_17: # %entry ; RV32-NEXT: mv a1, a5 -; RV32-NEXT: .LBB51_18: # %entry +; RV32-NEXT: .LBB51_16: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB51_19: # %entry +; RV32-NEXT: .LBB51_17: # %entry ; RV32-NEXT: li t0, -1 ; RV32-NEXT: lw a0, 16(sp) ; RV32-NEXT: beq a1, a6, .LBB51_4 -; RV32-NEXT: .LBB51_20: # %entry +; RV32-NEXT: .LBB51_18: # %entry ; RV32-NEXT: mv a5, t0 ; RV32-NEXT: or t0, a0, a3 ; RV32-NEXT: beqz t0, .LBB51_5 -; RV32-NEXT: .LBB51_21: # %entry +; RV32-NEXT: .LBB51_19: # %entry ; RV32-NEXT: mv a5, a7 ; RV32-NEXT: mv a7, a1 ; RV32-NEXT: bltz a3, .LBB51_6 -; RV32-NEXT: .LBB51_22: # %entry +; RV32-NEXT: .LBB51_20: # %entry ; RV32-NEXT: mv a7, a6 ; RV32-NEXT: bltu a1, a6, .LBB51_7 -; RV32-NEXT: .LBB51_23: # %entry +; RV32-NEXT: .LBB51_21: # %entry ; RV32-NEXT: mv a1, a6 ; RV32-NEXT: beqz t0, .LBB51_8 -; RV32-NEXT: .LBB51_24: # %entry +; RV32-NEXT: .LBB51_22: # %entry ; RV32-NEXT: mv a1, a7 ; RV32-NEXT: li a6, 0 ; RV32-NEXT: beqz a3, .LBB51_9 -; RV32-NEXT: .LBB51_25: # %entry +; RV32-NEXT: .LBB51_23: # %entry ; RV32-NEXT: srai a6, a3, 31 ; RV32-NEXT: and a6, a6, a0 ; RV32-NEXT: bltz a3, .LBB51_10 -; RV32-NEXT: .LBB51_26: # %entry +; RV32-NEXT: .LBB51_24: # %entry ; RV32-NEXT: li a3, 0 -; RV32-NEXT: mv a7, a5 -; RV32-NEXT: bltu a4, a1, .LBB51_11 -; RV32-NEXT: .LBB51_27: # %entry -; RV32-NEXT: li a7, 0 ; RV32-NEXT: mv a0, a5 -; RV32-NEXT: beq a1, a4, .LBB51_12 -; RV32-NEXT: .LBB51_28: # %entry -; RV32-NEXT: mv a0, a7 -; RV32-NEXT: bgez a3, .LBB51_13 -; RV32-NEXT: .LBB51_29: # %entry -; RV32-NEXT: li a5, 0 +; RV32-NEXT: beq a1, a4, .LBB51_11 +; RV32-NEXT: .LBB51_25: # %entry +; RV32-NEXT: sltu a0, a4, a1 +; RV32-NEXT: and a0, a0, a5 ; RV32-NEXT: and a6, a6, a3 -; RV32-NEXT: beq a6, a2, .LBB51_14 -; RV32-NEXT: .LBB51_30: # %entry -; RV32-NEXT: mv a0, a5 +; RV32-NEXT: beq a6, a2, .LBB51_12 +; RV32-NEXT: .LBB51_26: # %entry +; RV32-NEXT: slt a0, a2, a3 +; RV32-NEXT: and a0, a0, a5 ; RV32-NEXT: mv a5, a1 -; RV32-NEXT: bgez a3, .LBB51_15 -; RV32-NEXT: .LBB51_31: # %entry +; RV32-NEXT: bgez a3, .LBB51_13 +; RV32-NEXT: .LBB51_27: # %entry ; RV32-NEXT: lui a5, 524288 -; RV32-NEXT: bltu a4, a1, .LBB51_16 -; RV32-NEXT: .LBB51_32: # %entry +; RV32-NEXT: bltu a4, a1, .LBB51_14 +; RV32-NEXT: .LBB51_28: # %entry ; RV32-NEXT: lui a1, 524288 -; RV32-NEXT: bne a6, a2, .LBB51_17 -; RV32-NEXT: j .LBB51_18 +; RV32-NEXT: bne a6, a2, .LBB51_15 +; RV32-NEXT: j .LBB51_16 ; ; RV64-LABEL: stest_f16i64_mm: ; RV64: # %bb.0: # %entry @@ -4355,40 +4009,21 @@ ; RV32-NEXT: call __extendhfsf2@plt ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: call __fixunssfti@plt -; RV32-NEXT: lw a0, 20(sp) -; RV32-NEXT: lw a3, 16(sp) +; RV32-NEXT: lw a2, 16(sp) +; RV32-NEXT: lw a3, 20(sp) +; RV32-NEXT: xori a0, a2, 1 +; RV32-NEXT: or a4, a0, a3 +; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a0, .LBB52_3 +; RV32-NEXT: beqz a4, .LBB52_2 ; RV32-NEXT: # %bb.1: # %entry -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: beq a2, a1, .LBB52_4 -; RV32-NEXT: .LBB52_2: -; RV32-NEXT: lw a4, 8(sp) -; RV32-NEXT: j .LBB52_5 -; RV32-NEXT: .LBB52_3: -; RV32-NEXT: seqz a2, a3 -; RV32-NEXT: bne a2, a1, .LBB52_2 -; RV32-NEXT: .LBB52_4: # %entry -; RV32-NEXT: mv a4, a1 -; RV32-NEXT: .LBB52_5: # %entry -; RV32-NEXT: xori a3, a3, 1 -; RV32-NEXT: or a3, a3, a0 -; RV32-NEXT: mv a0, a1 -; RV32-NEXT: beq a3, a1, .LBB52_7 -; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: mv a0, a4 -; RV32-NEXT: .LBB52_7: # %entry -; RV32-NEXT: bne a2, a1, .LBB52_9 -; RV32-NEXT: # %bb.8: # %entry -; RV32-NEXT: mv a2, a1 -; RV32-NEXT: bne a3, a1, .LBB52_10 -; RV32-NEXT: j .LBB52_11 -; RV32-NEXT: .LBB52_9: -; RV32-NEXT: lw a2, 12(sp) -; RV32-NEXT: beq a3, a1, .LBB52_11 -; RV32-NEXT: .LBB52_10: # %entry -; RV32-NEXT: mv a1, a2 -; RV32-NEXT: .LBB52_11: # %entry +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: or a2, a3, a2 +; RV32-NEXT: seqz a2, a2 +; RV32-NEXT: and a0, a2, a0 +; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: .LBB52_2: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -4402,17 +4037,14 @@ ; RV64-NEXT: fmv.x.w a0, fa0 ; RV64-NEXT: call __extendhfsf2@plt ; RV64-NEXT: call __fixunssfti@plt +; RV64-NEXT: li a3, 1 ; RV64-NEXT: mv a2, a0 ; RV64-NEXT: li a0, 0 -; RV64-NEXT: beqz a1, .LBB52_2 +; RV64-NEXT: beq a1, a3, .LBB52_2 ; RV64-NEXT: # %bb.1: # %entry -; RV64-NEXT: mv a2, a0 +; RV64-NEXT: seqz a0, a1 +; RV64-NEXT: and a0, a0, a2 ; RV64-NEXT: .LBB52_2: # %entry -; RV64-NEXT: li a3, 1 -; RV64-NEXT: beq a1, a3, .LBB52_4 -; RV64-NEXT: # %bb.3: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: .LBB52_4: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -4436,105 +4068,70 @@ ; RV32-NEXT: call __fixsfti@plt ; RV32-NEXT: lw a0, 16(sp) ; RV32-NEXT: lw a2, 20(sp) -; RV32-NEXT: li a1, 1 -; RV32-NEXT: mv a4, a0 -; RV32-NEXT: bgez a2, .LBB53_5 +; RV32-NEXT: li a4, 1 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bgez a2, .LBB53_4 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a3, a0 -; RV32-NEXT: bgeu a0, a1, .LBB53_6 +; RV32-NEXT: bgeu a0, a4, .LBB53_5 ; RV32-NEXT: .LBB53_2: # %entry -; RV32-NEXT: beqz a2, .LBB53_7 +; RV32-NEXT: beqz a2, .LBB53_6 ; RV32-NEXT: .LBB53_3: # %entry -; RV32-NEXT: slti a1, a2, 0 -; RV32-NEXT: mv a3, a4 -; RV32-NEXT: beqz a1, .LBB53_8 -; RV32-NEXT: .LBB53_4: -; RV32-NEXT: lw a5, 8(sp) -; RV32-NEXT: j .LBB53_9 -; RV32-NEXT: .LBB53_5: # %entry -; RV32-NEXT: li a4, 1 +; RV32-NEXT: slti a5, a2, 0 +; RV32-NEXT: mv a3, a1 +; RV32-NEXT: j .LBB53_7 +; RV32-NEXT: .LBB53_4: # %entry +; RV32-NEXT: li a1, 1 ; RV32-NEXT: mv a3, a0 -; RV32-NEXT: bltu a0, a1, .LBB53_2 -; RV32-NEXT: .LBB53_6: # %entry +; RV32-NEXT: bltu a0, a4, .LBB53_2 +; RV32-NEXT: .LBB53_5: # %entry ; RV32-NEXT: li a3, 1 ; RV32-NEXT: bnez a2, .LBB53_3 -; RV32-NEXT: .LBB53_7: -; RV32-NEXT: seqz a1, a0 -; RV32-NEXT: bnez a1, .LBB53_4 -; RV32-NEXT: .LBB53_8: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB53_9: # %entry +; RV32-NEXT: .LBB53_6: +; RV32-NEXT: seqz a5, a0 +; RV32-NEXT: .LBB53_7: # %entry ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: li a4, 0 -; RV32-NEXT: beqz a0, .LBB53_11 -; RV32-NEXT: # %bb.10: # %entry -; RV32-NEXT: mv a4, a5 -; RV32-NEXT: .LBB53_11: # %entry -; RV32-NEXT: bnez a1, .LBB53_13 -; RV32-NEXT: # %bb.12: # %entry -; RV32-NEXT: li a5, 0 -; RV32-NEXT: li a1, 0 -; RV32-NEXT: bnez a0, .LBB53_14 -; RV32-NEXT: j .LBB53_15 -; RV32-NEXT: .LBB53_13: -; RV32-NEXT: lw a5, 12(sp) ; RV32-NEXT: li a1, 0 -; RV32-NEXT: beqz a0, .LBB53_15 -; RV32-NEXT: .LBB53_14: # %entry -; RV32-NEXT: mv a1, a5 -; RV32-NEXT: .LBB53_15: # %entry -; RV32-NEXT: bgez a2, .LBB53_20 -; RV32-NEXT: # %bb.16: # %entry -; RV32-NEXT: mv a5, a4 -; RV32-NEXT: beqz a1, .LBB53_21 -; RV32-NEXT: .LBB53_17: # %entry +; RV32-NEXT: bnez a0, .LBB53_12 +; RV32-NEXT: # %bb.8: # %entry +; RV32-NEXT: bgez a2, .LBB53_13 +; RV32-NEXT: .LBB53_9: # %entry ; RV32-NEXT: mv a0, a4 -; RV32-NEXT: bnez a1, .LBB53_22 -; RV32-NEXT: .LBB53_18: # %entry -; RV32-NEXT: beqz a2, .LBB53_23 -; RV32-NEXT: .LBB53_19: # %entry +; RV32-NEXT: bnez a1, .LBB53_14 +; RV32-NEXT: .LBB53_10: # %entry +; RV32-NEXT: beqz a2, .LBB53_15 +; RV32-NEXT: .LBB53_11: # %entry ; RV32-NEXT: sgtz a5, a2 -; RV32-NEXT: beqz a5, .LBB53_24 -; RV32-NEXT: j .LBB53_25 -; RV32-NEXT: .LBB53_20: # %entry +; RV32-NEXT: or a2, a3, a2 +; RV32-NEXT: bnez a2, .LBB53_16 +; RV32-NEXT: j .LBB53_17 +; RV32-NEXT: .LBB53_12: # %entry +; RV32-NEXT: lw a0, 8(sp) +; RV32-NEXT: lw a1, 12(sp) +; RV32-NEXT: and a4, a5, a0 +; RV32-NEXT: and a1, a5, a1 +; RV32-NEXT: bltz a2, .LBB53_9 +; RV32-NEXT: .LBB53_13: # %entry ; RV32-NEXT: li a2, 0 -; RV32-NEXT: mv a5, a4 -; RV32-NEXT: bnez a1, .LBB53_17 -; RV32-NEXT: .LBB53_21: # %entry -; RV32-NEXT: li a5, 0 ; RV32-NEXT: mv a0, a4 -; RV32-NEXT: beqz a1, .LBB53_18 -; RV32-NEXT: .LBB53_22: # %entry -; RV32-NEXT: mv a0, a5 -; RV32-NEXT: bnez a2, .LBB53_19 -; RV32-NEXT: .LBB53_23: +; RV32-NEXT: beqz a1, .LBB53_10 +; RV32-NEXT: .LBB53_14: # %entry +; RV32-NEXT: snez a0, a1 +; RV32-NEXT: and a0, a0, a4 +; RV32-NEXT: bnez a2, .LBB53_11 +; RV32-NEXT: .LBB53_15: ; RV32-NEXT: snez a5, a3 -; RV32-NEXT: bnez a5, .LBB53_25 -; RV32-NEXT: .LBB53_24: # %entry -; RV32-NEXT: li a4, 0 -; RV32-NEXT: .LBB53_25: # %entry ; RV32-NEXT: or a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB53_30 -; RV32-NEXT: # %bb.26: # %entry -; RV32-NEXT: mv a3, a1 -; RV32-NEXT: beqz a5, .LBB53_31 -; RV32-NEXT: .LBB53_27: # %entry -; RV32-NEXT: beqz a2, .LBB53_29 -; RV32-NEXT: .LBB53_28: # %entry -; RV32-NEXT: mv a1, a3 -; RV32-NEXT: .LBB53_29: # %entry +; RV32-NEXT: beqz a2, .LBB53_17 +; RV32-NEXT: .LBB53_16: # %entry +; RV32-NEXT: and a0, a5, a4 +; RV32-NEXT: and a1, a5, a1 +; RV32-NEXT: .LBB53_17: # %entry ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret -; RV32-NEXT: .LBB53_30: # %entry -; RV32-NEXT: mv a0, a4 -; RV32-NEXT: mv a3, a1 -; RV32-NEXT: bnez a5, .LBB53_27 -; RV32-NEXT: .LBB53_31: # %entry -; RV32-NEXT: li a3, 0 -; RV32-NEXT: bnez a2, .LBB53_28 -; RV32-NEXT: j .LBB53_29 ; ; RV64-LABEL: ustest_f16i64_mm: ; RV64: # %bb.0: # %entry @@ -4548,34 +4145,28 @@ ; RV64-NEXT: mv a2, a0 ; RV64-NEXT: li a4, 1 ; RV64-NEXT: mv a3, a1 -; RV64-NEXT: bgtz a1, .LBB53_6 +; RV64-NEXT: bgtz a1, .LBB53_5 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: li a0, 0 -; RV64-NEXT: bne a1, a4, .LBB53_7 +; RV64-NEXT: bne a1, a4, .LBB53_6 ; RV64-NEXT: .LBB53_2: # %entry -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: blez a3, .LBB53_8 +; RV64-NEXT: beqz a3, .LBB53_4 ; RV64-NEXT: .LBB53_3: # %entry -; RV64-NEXT: beqz a3, .LBB53_5 +; RV64-NEXT: sgtz a1, a3 +; RV64-NEXT: and a0, a1, a0 ; RV64-NEXT: .LBB53_4: # %entry -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: .LBB53_5: # %entry ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret -; RV64-NEXT: .LBB53_6: # %entry -; RV64-NEXT: li a2, 0 +; RV64-NEXT: .LBB53_5: # %entry ; RV64-NEXT: li a3, 1 ; RV64-NEXT: li a0, 0 ; RV64-NEXT: beq a1, a4, .LBB53_2 -; RV64-NEXT: .LBB53_7: # %entry -; RV64-NEXT: mv a0, a2 -; RV64-NEXT: mv a1, a0 -; RV64-NEXT: bgtz a3, .LBB53_3 -; RV64-NEXT: .LBB53_8: # %entry -; RV64-NEXT: li a1, 0 -; RV64-NEXT: bnez a3, .LBB53_4 -; RV64-NEXT: j .LBB53_5 +; RV64-NEXT: .LBB53_6: # %entry +; RV64-NEXT: slti a0, a1, 1 +; RV64-NEXT: and a0, a0, a2 +; RV64-NEXT: bnez a3, .LBB53_3 +; RV64-NEXT: j .LBB53_4 entry: %conv = fptosi half %x to i128 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -71,30 +71,22 @@ define <2 x i32> @ustest_f64i32(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fcvt.l.d a1, fa1, rtz -; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: srli a2, a0, 32 ; CHECK-NEXT: fcvt.l.d a0, fa0, rtz -; CHECK-NEXT: bge a1, a2, .LBB2_5 +; CHECK-NEXT: li a1, -1 +; CHECK-NEXT: srli a2, a1, 32 +; CHECK-NEXT: fcvt.l.d a1, fa1, rtz +; CHECK-NEXT: blt a0, a2, .LBB2_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: bge a0, a2, .LBB2_6 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: .LBB2_2: # %entry -; CHECK-NEXT: blez a0, .LBB2_7 -; CHECK-NEXT: .LBB2_3: # %entry -; CHECK-NEXT: blez a1, .LBB2_8 -; CHECK-NEXT: .LBB2_4: # %entry -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB2_5: # %entry +; CHECK-NEXT: blt a1, a2, .LBB2_4 +; CHECK-NEXT: # %bb.3: # %entry ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: blt a0, a2, .LBB2_2 -; CHECK-NEXT: .LBB2_6: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: bgtz a0, .LBB2_3 -; CHECK-NEXT: .LBB2_7: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: bgtz a1, .LBB2_4 -; CHECK-NEXT: .LBB2_8: # %entry -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: .LBB2_4: # %entry +; CHECK-NEXT: sgtz a2, a1 +; CHECK-NEXT: sgtz a3, a0 +; CHECK-NEXT: and a0, a3, a0 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i64> @@ -219,59 +211,47 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) { ; CHECK-LABEL: ustest_f32i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fcvt.l.s a1, fa3, rtz +; CHECK-NEXT: fcvt.l.s a1, fa0, rtz ; CHECK-NEXT: li a2, -1 -; CHECK-NEXT: srli a5, a2, 32 -; CHECK-NEXT: fcvt.l.s a2, fa2, rtz -; CHECK-NEXT: bge a1, a5, .LBB5_10 +; CHECK-NEXT: srli a4, a2, 32 +; CHECK-NEXT: fcvt.l.s a2, fa1, rtz +; CHECK-NEXT: bge a1, a4, .LBB5_6 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: fcvt.l.s a3, fa1, rtz -; CHECK-NEXT: bge a2, a5, .LBB5_11 +; CHECK-NEXT: fcvt.l.s a3, fa2, rtz +; CHECK-NEXT: bge a2, a4, .LBB5_7 ; CHECK-NEXT: .LBB5_2: # %entry -; CHECK-NEXT: fcvt.l.s a4, fa0, rtz -; CHECK-NEXT: bge a3, a5, .LBB5_12 +; CHECK-NEXT: fcvt.l.s a5, fa3, rtz +; CHECK-NEXT: bge a3, a4, .LBB5_8 ; CHECK-NEXT: .LBB5_3: # %entry -; CHECK-NEXT: bge a4, a5, .LBB5_13 +; CHECK-NEXT: blt a5, a4, .LBB5_5 ; CHECK-NEXT: .LBB5_4: # %entry -; CHECK-NEXT: blez a4, .LBB5_14 +; CHECK-NEXT: mv a5, a4 ; CHECK-NEXT: .LBB5_5: # %entry -; CHECK-NEXT: blez a3, .LBB5_15 +; CHECK-NEXT: sgtz a4, a5 +; CHECK-NEXT: sgtz a6, a3 +; CHECK-NEXT: sgtz a7, a2 +; CHECK-NEXT: sgtz t0, a1 +; CHECK-NEXT: and a1, t0, a1 +; CHECK-NEXT: and a2, a7, a2 +; CHECK-NEXT: and a3, a6, a3 +; CHECK-NEXT: and a4, a4, a5 +; CHECK-NEXT: sw a4, 12(a0) +; CHECK-NEXT: sw a3, 8(a0) +; CHECK-NEXT: sw a2, 4(a0) +; CHECK-NEXT: sw a1, 0(a0) +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB5_6: # %entry -; CHECK-NEXT: blez a2, .LBB5_16 +; CHECK-NEXT: mv a1, a4 +; CHECK-NEXT: fcvt.l.s a3, fa2, rtz +; CHECK-NEXT: blt a2, a4, .LBB5_2 ; CHECK-NEXT: .LBB5_7: # %entry -; CHECK-NEXT: bgtz a1, .LBB5_9 +; CHECK-NEXT: mv a2, a4 +; CHECK-NEXT: fcvt.l.s a5, fa3, rtz +; CHECK-NEXT: blt a3, a4, .LBB5_3 ; CHECK-NEXT: .LBB5_8: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: .LBB5_9: # %entry -; CHECK-NEXT: sw a1, 12(a0) -; CHECK-NEXT: sw a2, 8(a0) -; CHECK-NEXT: sw a3, 4(a0) -; CHECK-NEXT: sw a4, 0(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB5_10: # %entry -; CHECK-NEXT: mv a1, a5 -; CHECK-NEXT: fcvt.l.s a3, fa1, rtz -; CHECK-NEXT: blt a2, a5, .LBB5_2 -; CHECK-NEXT: .LBB5_11: # %entry -; CHECK-NEXT: mv a2, a5 -; CHECK-NEXT: fcvt.l.s a4, fa0, rtz -; CHECK-NEXT: blt a3, a5, .LBB5_3 -; CHECK-NEXT: .LBB5_12: # %entry -; CHECK-NEXT: mv a3, a5 -; CHECK-NEXT: blt a4, a5, .LBB5_4 -; CHECK-NEXT: .LBB5_13: # %entry -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: bgtz a4, .LBB5_5 -; CHECK-NEXT: .LBB5_14: # %entry -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: bgtz a3, .LBB5_6 -; CHECK-NEXT: .LBB5_15: # %entry -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bgtz a2, .LBB5_7 -; CHECK-NEXT: .LBB5_16: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: blez a1, .LBB5_8 -; CHECK-NEXT: j .LBB5_9 +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: bge a5, a4, .LBB5_4 +; CHECK-NEXT: j .LBB5_5 entry: %conv = fptosi <4 x float> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, @@ -499,10 +479,10 @@ ; CHECK-NEXT: .cfi_offset fs0, -48 ; CHECK-NEXT: .cfi_offset fs1, -56 ; CHECK-NEXT: .cfi_offset fs2, -64 -; CHECK-NEXT: lhu s1, 24(a1) -; CHECK-NEXT: lhu s2, 0(a1) -; CHECK-NEXT: lhu s3, 8(a1) -; CHECK-NEXT: lhu a1, 16(a1) +; CHECK-NEXT: lhu s1, 0(a1) +; CHECK-NEXT: lhu s2, 24(a1) +; CHECK-NEXT: lhu s3, 16(a1) +; CHECK-NEXT: lhu a1, 8(a1) ; CHECK-NEXT: mv s0, a0 ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: call __extendhfsf2@plt @@ -518,31 +498,31 @@ ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: srli a3, a1, 32 -; CHECK-NEXT: bge a0, a3, .LBB8_10 +; CHECK-NEXT: srli a2, a1, 32 +; CHECK-NEXT: bge a0, a2, .LBB8_6 ; CHECK-NEXT: # %bb.1: # %entry ; CHECK-NEXT: fcvt.l.s a1, fs1, rtz -; CHECK-NEXT: bge s2, a3, .LBB8_11 +; CHECK-NEXT: bge s2, a2, .LBB8_7 ; CHECK-NEXT: .LBB8_2: # %entry -; CHECK-NEXT: fcvt.l.s a2, fs0, rtz -; CHECK-NEXT: bge a1, a3, .LBB8_12 +; CHECK-NEXT: fcvt.l.s a3, fs0, rtz +; CHECK-NEXT: bge a1, a2, .LBB8_8 ; CHECK-NEXT: .LBB8_3: # %entry -; CHECK-NEXT: bge a2, a3, .LBB8_13 +; CHECK-NEXT: blt a3, a2, .LBB8_5 ; CHECK-NEXT: .LBB8_4: # %entry -; CHECK-NEXT: blez a2, .LBB8_14 +; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB8_5: # %entry -; CHECK-NEXT: blez a1, .LBB8_15 -; CHECK-NEXT: .LBB8_6: # %entry -; CHECK-NEXT: blez s2, .LBB8_16 -; CHECK-NEXT: .LBB8_7: # %entry -; CHECK-NEXT: bgtz a0, .LBB8_9 -; CHECK-NEXT: .LBB8_8: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB8_9: # %entry -; CHECK-NEXT: sw a0, 12(s0) -; CHECK-NEXT: sw s2, 8(s0) -; CHECK-NEXT: sw a1, 4(s0) -; CHECK-NEXT: sw a2, 0(s0) +; CHECK-NEXT: sgtz a2, a3 +; CHECK-NEXT: sgtz a4, a1 +; CHECK-NEXT: sgtz a5, s2 +; CHECK-NEXT: sgtz a6, a0 +; CHECK-NEXT: and a0, a6, a0 +; CHECK-NEXT: and a5, a5, s2 +; CHECK-NEXT: and a1, a4, a1 +; CHECK-NEXT: and a2, a2, a3 +; CHECK-NEXT: sw a2, 12(s0) +; CHECK-NEXT: sw a1, 8(s0) +; CHECK-NEXT: sw a5, 4(s0) +; CHECK-NEXT: sw a0, 0(s0) ; CHECK-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 40(sp) # 8-byte Folded Reload @@ -553,30 +533,18 @@ ; CHECK-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 64 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB8_10: # %entry -; CHECK-NEXT: mv a0, a3 +; CHECK-NEXT: .LBB8_6: # %entry +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: fcvt.l.s a1, fs1, rtz -; CHECK-NEXT: blt s2, a3, .LBB8_2 -; CHECK-NEXT: .LBB8_11: # %entry -; CHECK-NEXT: mv s2, a3 -; CHECK-NEXT: fcvt.l.s a2, fs0, rtz -; CHECK-NEXT: blt a1, a3, .LBB8_3 -; CHECK-NEXT: .LBB8_12: # %entry -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: blt a2, a3, .LBB8_4 -; CHECK-NEXT: .LBB8_13: # %entry -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: bgtz a2, .LBB8_5 -; CHECK-NEXT: .LBB8_14: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bgtz a1, .LBB8_6 -; CHECK-NEXT: .LBB8_15: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bgtz s2, .LBB8_7 -; CHECK-NEXT: .LBB8_16: # %entry -; CHECK-NEXT: li s2, 0 -; CHECK-NEXT: blez a0, .LBB8_8 -; CHECK-NEXT: j .LBB8_9 +; CHECK-NEXT: blt s2, a2, .LBB8_2 +; CHECK-NEXT: .LBB8_7: # %entry +; CHECK-NEXT: mv s2, a2 +; CHECK-NEXT: fcvt.l.s a3, fs0, rtz +; CHECK-NEXT: blt a1, a2, .LBB8_3 +; CHECK-NEXT: .LBB8_8: # %entry +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: bge a3, a2, .LBB8_4 +; CHECK-NEXT: j .LBB8_5 entry: %conv = fptosi <4 x half> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, @@ -658,30 +626,22 @@ define <2 x i16> @ustest_f64i16(<2 x double> %x) { ; CHECK-LABEL: ustest_f64i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fcvt.w.d a1, fa1, rtz -; CHECK-NEXT: lui a0, 16 -; CHECK-NEXT: addiw a2, a0, -1 ; CHECK-NEXT: fcvt.w.d a0, fa0, rtz -; CHECK-NEXT: bge a1, a2, .LBB11_5 +; CHECK-NEXT: lui a1, 16 +; CHECK-NEXT: addiw a2, a1, -1 +; CHECK-NEXT: fcvt.w.d a1, fa1, rtz +; CHECK-NEXT: blt a0, a2, .LBB11_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: bge a0, a2, .LBB11_6 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: .LBB11_2: # %entry -; CHECK-NEXT: blez a0, .LBB11_7 -; CHECK-NEXT: .LBB11_3: # %entry -; CHECK-NEXT: blez a1, .LBB11_8 -; CHECK-NEXT: .LBB11_4: # %entry -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB11_5: # %entry +; CHECK-NEXT: blt a1, a2, .LBB11_4 +; CHECK-NEXT: # %bb.3: # %entry ; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: blt a0, a2, .LBB11_2 -; CHECK-NEXT: .LBB11_6: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: bgtz a0, .LBB11_3 -; CHECK-NEXT: .LBB11_7: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: bgtz a1, .LBB11_4 -; CHECK-NEXT: .LBB11_8: # %entry -; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: .LBB11_4: # %entry +; CHECK-NEXT: sgtz a2, a1 +; CHECK-NEXT: sgtz a3, a0 +; CHECK-NEXT: and a0, a3, a0 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: ret entry: %conv = fptosi <2 x double> %x to <2 x i32> @@ -808,59 +768,47 @@ define <4 x i16> @ustest_f32i16(<4 x float> %x) { ; CHECK-LABEL: ustest_f32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fcvt.w.s a1, fa3, rtz +; CHECK-NEXT: fcvt.w.s a1, fa0, rtz ; CHECK-NEXT: lui a2, 16 -; CHECK-NEXT: addiw a5, a2, -1 -; CHECK-NEXT: fcvt.w.s a2, fa2, rtz -; CHECK-NEXT: bge a1, a5, .LBB14_10 +; CHECK-NEXT: addiw a4, a2, -1 +; CHECK-NEXT: fcvt.w.s a2, fa1, rtz +; CHECK-NEXT: bge a1, a4, .LBB14_6 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: fcvt.w.s a3, fa1, rtz -; CHECK-NEXT: bge a2, a5, .LBB14_11 +; CHECK-NEXT: fcvt.w.s a3, fa2, rtz +; CHECK-NEXT: bge a2, a4, .LBB14_7 ; CHECK-NEXT: .LBB14_2: # %entry -; CHECK-NEXT: fcvt.w.s a4, fa0, rtz -; CHECK-NEXT: bge a3, a5, .LBB14_12 +; CHECK-NEXT: fcvt.w.s a5, fa3, rtz +; CHECK-NEXT: bge a3, a4, .LBB14_8 ; CHECK-NEXT: .LBB14_3: # %entry -; CHECK-NEXT: bge a4, a5, .LBB14_13 +; CHECK-NEXT: blt a5, a4, .LBB14_5 ; CHECK-NEXT: .LBB14_4: # %entry -; CHECK-NEXT: blez a4, .LBB14_14 +; CHECK-NEXT: mv a5, a4 ; CHECK-NEXT: .LBB14_5: # %entry -; CHECK-NEXT: blez a3, .LBB14_15 +; CHECK-NEXT: sgtz a4, a5 +; CHECK-NEXT: sgtz a6, a3 +; CHECK-NEXT: sgtz a7, a2 +; CHECK-NEXT: sgtz t0, a1 +; CHECK-NEXT: and a1, t0, a1 +; CHECK-NEXT: and a2, a7, a2 +; CHECK-NEXT: and a3, a6, a3 +; CHECK-NEXT: and a4, a4, a5 +; CHECK-NEXT: sh a4, 6(a0) +; CHECK-NEXT: sh a3, 4(a0) +; CHECK-NEXT: sh a2, 2(a0) +; CHECK-NEXT: sh a1, 0(a0) +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB14_6: # %entry -; CHECK-NEXT: blez a2, .LBB14_16 +; CHECK-NEXT: mv a1, a4 +; CHECK-NEXT: fcvt.w.s a3, fa2, rtz +; CHECK-NEXT: blt a2, a4, .LBB14_2 ; CHECK-NEXT: .LBB14_7: # %entry -; CHECK-NEXT: bgtz a1, .LBB14_9 +; CHECK-NEXT: mv a2, a4 +; CHECK-NEXT: fcvt.w.s a5, fa3, rtz +; CHECK-NEXT: blt a3, a4, .LBB14_3 ; CHECK-NEXT: .LBB14_8: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: .LBB14_9: # %entry -; CHECK-NEXT: sh a1, 6(a0) -; CHECK-NEXT: sh a2, 4(a0) -; CHECK-NEXT: sh a3, 2(a0) -; CHECK-NEXT: sh a4, 0(a0) -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB14_10: # %entry -; CHECK-NEXT: mv a1, a5 -; CHECK-NEXT: fcvt.w.s a3, fa1, rtz -; CHECK-NEXT: blt a2, a5, .LBB14_2 -; CHECK-NEXT: .LBB14_11: # %entry -; CHECK-NEXT: mv a2, a5 -; CHECK-NEXT: fcvt.w.s a4, fa0, rtz -; CHECK-NEXT: blt a3, a5, .LBB14_3 -; CHECK-NEXT: .LBB14_12: # %entry -; CHECK-NEXT: mv a3, a5 -; CHECK-NEXT: blt a4, a5, .LBB14_4 -; CHECK-NEXT: .LBB14_13: # %entry -; CHECK-NEXT: mv a4, a5 -; CHECK-NEXT: bgtz a4, .LBB14_5 -; CHECK-NEXT: .LBB14_14: # %entry -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: bgtz a3, .LBB14_6 -; CHECK-NEXT: .LBB14_15: # %entry -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bgtz a2, .LBB14_7 -; CHECK-NEXT: .LBB14_16: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: blez a1, .LBB14_8 -; CHECK-NEXT: j .LBB14_9 +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: bge a5, a4, .LBB14_4 +; CHECK-NEXT: j .LBB14_5 entry: %conv = fptosi <4 x float> %x to <4 x i32> %0 = icmp slt <4 x i32> %conv, @@ -1270,14 +1218,14 @@ ; CHECK-NEXT: .cfi_offset fs4, -112 ; CHECK-NEXT: .cfi_offset fs5, -120 ; CHECK-NEXT: .cfi_offset fs6, -128 -; CHECK-NEXT: lhu s1, 56(a1) -; CHECK-NEXT: lhu s2, 0(a1) -; CHECK-NEXT: lhu s3, 8(a1) -; CHECK-NEXT: lhu s4, 16(a1) -; CHECK-NEXT: lhu s5, 24(a1) -; CHECK-NEXT: lhu s6, 32(a1) -; CHECK-NEXT: lhu s7, 40(a1) -; CHECK-NEXT: lhu a1, 48(a1) +; CHECK-NEXT: lhu s1, 0(a1) +; CHECK-NEXT: lhu s2, 56(a1) +; CHECK-NEXT: lhu s3, 48(a1) +; CHECK-NEXT: lhu s4, 40(a1) +; CHECK-NEXT: lhu s5, 32(a1) +; CHECK-NEXT: lhu s6, 24(a1) +; CHECK-NEXT: lhu s7, 16(a1) +; CHECK-NEXT: lhu a1, 8(a1) ; CHECK-NEXT: mv s0, a0 ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: call __extendhfsf2@plt @@ -1305,55 +1253,55 @@ ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-NEXT: lui a1, 16 -; CHECK-NEXT: addiw a7, a1, -1 -; CHECK-NEXT: bge a0, a7, .LBB17_18 +; CHECK-NEXT: addiw a2, a1, -1 +; CHECK-NEXT: bge a0, a2, .LBB17_10 ; CHECK-NEXT: # %bb.1: # %entry ; CHECK-NEXT: fcvt.l.s a1, fs5, rtz -; CHECK-NEXT: bge s2, a7, .LBB17_19 +; CHECK-NEXT: bge s2, a2, .LBB17_11 ; CHECK-NEXT: .LBB17_2: # %entry -; CHECK-NEXT: fcvt.l.s a2, fs4, rtz -; CHECK-NEXT: bge a1, a7, .LBB17_20 +; CHECK-NEXT: fcvt.l.s a3, fs4, rtz +; CHECK-NEXT: bge a1, a2, .LBB17_12 ; CHECK-NEXT: .LBB17_3: # %entry -; CHECK-NEXT: fcvt.l.s a3, fs3, rtz -; CHECK-NEXT: bge a2, a7, .LBB17_21 +; CHECK-NEXT: fcvt.l.s a4, fs3, rtz +; CHECK-NEXT: bge a3, a2, .LBB17_13 ; CHECK-NEXT: .LBB17_4: # %entry -; CHECK-NEXT: fcvt.l.s a4, fs2, rtz -; CHECK-NEXT: bge a3, a7, .LBB17_22 +; CHECK-NEXT: fcvt.l.s a5, fs2, rtz +; CHECK-NEXT: bge a4, a2, .LBB17_14 ; CHECK-NEXT: .LBB17_5: # %entry -; CHECK-NEXT: fcvt.l.s a5, fs1, rtz -; CHECK-NEXT: bge a4, a7, .LBB17_23 +; CHECK-NEXT: fcvt.l.s a6, fs1, rtz +; CHECK-NEXT: bge a5, a2, .LBB17_15 ; CHECK-NEXT: .LBB17_6: # %entry -; CHECK-NEXT: fcvt.l.s a6, fs0, rtz -; CHECK-NEXT: bge a5, a7, .LBB17_24 +; CHECK-NEXT: fcvt.l.s a7, fs0, rtz +; CHECK-NEXT: bge a6, a2, .LBB17_16 ; CHECK-NEXT: .LBB17_7: # %entry -; CHECK-NEXT: bge a6, a7, .LBB17_25 +; CHECK-NEXT: blt a7, a2, .LBB17_9 ; CHECK-NEXT: .LBB17_8: # %entry -; CHECK-NEXT: blez a6, .LBB17_26 +; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB17_9: # %entry -; CHECK-NEXT: blez a5, .LBB17_27 -; CHECK-NEXT: .LBB17_10: # %entry -; CHECK-NEXT: blez a4, .LBB17_28 -; CHECK-NEXT: .LBB17_11: # %entry -; CHECK-NEXT: blez a3, .LBB17_29 -; CHECK-NEXT: .LBB17_12: # %entry -; CHECK-NEXT: blez a2, .LBB17_30 -; CHECK-NEXT: .LBB17_13: # %entry -; CHECK-NEXT: blez a1, .LBB17_31 -; CHECK-NEXT: .LBB17_14: # %entry -; CHECK-NEXT: blez s2, .LBB17_32 -; CHECK-NEXT: .LBB17_15: # %entry -; CHECK-NEXT: bgtz a0, .LBB17_17 -; CHECK-NEXT: .LBB17_16: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB17_17: # %entry -; CHECK-NEXT: sh a0, 14(s0) -; CHECK-NEXT: sh s2, 12(s0) -; CHECK-NEXT: sh a1, 10(s0) -; CHECK-NEXT: sh a2, 8(s0) +; CHECK-NEXT: sgtz a2, a7 +; CHECK-NEXT: sgtz t0, a6 +; CHECK-NEXT: sgtz t1, a5 +; CHECK-NEXT: sgtz t2, a4 +; CHECK-NEXT: sgtz t3, a3 +; CHECK-NEXT: sgtz t4, a1 +; CHECK-NEXT: sgtz t5, s2 +; CHECK-NEXT: sgtz t6, a0 +; CHECK-NEXT: and a0, t6, a0 +; CHECK-NEXT: and t5, t5, s2 +; CHECK-NEXT: and a1, t4, a1 +; CHECK-NEXT: and a3, t3, a3 +; CHECK-NEXT: and a4, t2, a4 +; CHECK-NEXT: and a5, t1, a5 +; CHECK-NEXT: and a6, t0, a6 +; CHECK-NEXT: and a2, a2, a7 +; CHECK-NEXT: sh a2, 14(s0) +; CHECK-NEXT: sh a6, 12(s0) +; CHECK-NEXT: sh a5, 10(s0) +; CHECK-NEXT: sh a4, 8(s0) ; CHECK-NEXT: sh a3, 6(s0) -; CHECK-NEXT: sh a4, 4(s0) -; CHECK-NEXT: sh a5, 2(s0) -; CHECK-NEXT: sh a6, 0(s0) +; CHECK-NEXT: sh a1, 4(s0) +; CHECK-NEXT: sh t5, 2(s0) +; CHECK-NEXT: sh a0, 0(s0) ; CHECK-NEXT: ld ra, 120(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 112(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 104(sp) # 8-byte Folded Reload @@ -1372,58 +1320,34 @@ ; CHECK-NEXT: fld fs6, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 128 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB17_18: # %entry -; CHECK-NEXT: mv a0, a7 +; CHECK-NEXT: .LBB17_10: # %entry +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: fcvt.l.s a1, fs5, rtz -; CHECK-NEXT: blt s2, a7, .LBB17_2 -; CHECK-NEXT: .LBB17_19: # %entry -; CHECK-NEXT: mv s2, a7 -; CHECK-NEXT: fcvt.l.s a2, fs4, rtz -; CHECK-NEXT: blt a1, a7, .LBB17_3 -; CHECK-NEXT: .LBB17_20: # %entry -; CHECK-NEXT: mv a1, a7 -; CHECK-NEXT: fcvt.l.s a3, fs3, rtz -; CHECK-NEXT: blt a2, a7, .LBB17_4 -; CHECK-NEXT: .LBB17_21: # %entry -; CHECK-NEXT: mv a2, a7 -; CHECK-NEXT: fcvt.l.s a4, fs2, rtz -; CHECK-NEXT: blt a3, a7, .LBB17_5 -; CHECK-NEXT: .LBB17_22: # %entry -; CHECK-NEXT: mv a3, a7 -; CHECK-NEXT: fcvt.l.s a5, fs1, rtz -; CHECK-NEXT: blt a4, a7, .LBB17_6 -; CHECK-NEXT: .LBB17_23: # %entry -; CHECK-NEXT: mv a4, a7 -; CHECK-NEXT: fcvt.l.s a6, fs0, rtz -; CHECK-NEXT: blt a5, a7, .LBB17_7 -; CHECK-NEXT: .LBB17_24: # %entry -; CHECK-NEXT: mv a5, a7 -; CHECK-NEXT: blt a6, a7, .LBB17_8 -; CHECK-NEXT: .LBB17_25: # %entry -; CHECK-NEXT: mv a6, a7 -; CHECK-NEXT: bgtz a6, .LBB17_9 -; CHECK-NEXT: .LBB17_26: # %entry -; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: bgtz a5, .LBB17_10 -; CHECK-NEXT: .LBB17_27: # %entry -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: bgtz a4, .LBB17_11 -; CHECK-NEXT: .LBB17_28: # %entry -; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: bgtz a3, .LBB17_12 -; CHECK-NEXT: .LBB17_29: # %entry -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: bgtz a2, .LBB17_13 -; CHECK-NEXT: .LBB17_30: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bgtz a1, .LBB17_14 -; CHECK-NEXT: .LBB17_31: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bgtz s2, .LBB17_15 -; CHECK-NEXT: .LBB17_32: # %entry -; CHECK-NEXT: li s2, 0 -; CHECK-NEXT: blez a0, .LBB17_16 -; CHECK-NEXT: j .LBB17_17 +; CHECK-NEXT: blt s2, a2, .LBB17_2 +; CHECK-NEXT: .LBB17_11: # %entry +; CHECK-NEXT: mv s2, a2 +; CHECK-NEXT: fcvt.l.s a3, fs4, rtz +; CHECK-NEXT: blt a1, a2, .LBB17_3 +; CHECK-NEXT: .LBB17_12: # %entry +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: fcvt.l.s a4, fs3, rtz +; CHECK-NEXT: blt a3, a2, .LBB17_4 +; CHECK-NEXT: .LBB17_13: # %entry +; CHECK-NEXT: mv a3, a2 +; CHECK-NEXT: fcvt.l.s a5, fs2, rtz +; CHECK-NEXT: blt a4, a2, .LBB17_5 +; CHECK-NEXT: .LBB17_14: # %entry +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: fcvt.l.s a6, fs1, rtz +; CHECK-NEXT: blt a5, a2, .LBB17_6 +; CHECK-NEXT: .LBB17_15: # %entry +; CHECK-NEXT: mv a5, a2 +; CHECK-NEXT: fcvt.l.s a7, fs0, rtz +; CHECK-NEXT: blt a6, a2, .LBB17_7 +; CHECK-NEXT: .LBB17_16: # %entry +; CHECK-NEXT: mv a6, a2 +; CHECK-NEXT: bge a7, a2, .LBB17_8 +; CHECK-NEXT: j .LBB17_9 entry: %conv = fptosi <8 x half> %x to <8 x i32> %0 = icmp slt <8 x i32> %conv, @@ -1473,37 +1397,35 @@ ; CHECK-NEXT: slti a5, s1, 0 ; CHECK-NEXT: bnez a5, .LBB18_6 ; CHECK-NEXT: .LBB18_5: # %entry -; CHECK-NEXT: li s1, 0 ; CHECK-NEXT: mv s0, a3 ; CHECK-NEXT: .LBB18_6: # %entry -; CHECK-NEXT: beqz a4, .LBB18_10 +; CHECK-NEXT: bnez a4, .LBB18_8 ; CHECK-NEXT: # %bb.7: # %entry -; CHECK-NEXT: slli a3, a0, 63 -; CHECK-NEXT: beq a1, a0, .LBB18_11 +; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB18_8: # %entry -; CHECK-NEXT: slt a1, a0, a1 -; CHECK-NEXT: bne s1, a0, .LBB18_12 -; CHECK-NEXT: .LBB18_9: -; CHECK-NEXT: sltu a0, a3, s0 +; CHECK-NEXT: and a5, a5, s1 +; CHECK-NEXT: and a3, a4, a1 +; CHECK-NEXT: slli a1, a0, 63 +; CHECK-NEXT: beq a3, a0, .LBB18_11 +; CHECK-NEXT: # %bb.9: # %entry +; CHECK-NEXT: slt a3, a0, a3 +; CHECK-NEXT: bne a5, a0, .LBB18_12 +; CHECK-NEXT: .LBB18_10: +; CHECK-NEXT: sltu a0, a1, s0 ; CHECK-NEXT: beqz a0, .LBB18_13 ; CHECK-NEXT: j .LBB18_14 -; CHECK-NEXT: .LBB18_10: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: slli a3, a0, 63 -; CHECK-NEXT: bne a1, a0, .LBB18_8 ; CHECK-NEXT: .LBB18_11: -; CHECK-NEXT: sltu a1, a3, a2 -; CHECK-NEXT: beq s1, a0, .LBB18_9 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: beq a5, a0, .LBB18_10 ; CHECK-NEXT: .LBB18_12: # %entry -; CHECK-NEXT: slt a0, a0, s1 +; CHECK-NEXT: slt a0, a0, a5 ; CHECK-NEXT: bnez a0, .LBB18_14 ; CHECK-NEXT: .LBB18_13: # %entry -; CHECK-NEXT: mv s0, a3 +; CHECK-NEXT: mv s0, a1 ; CHECK-NEXT: .LBB18_14: # %entry -; CHECK-NEXT: bnez a1, .LBB18_16 +; CHECK-NEXT: bnez a3, .LBB18_16 ; CHECK-NEXT: # %bb.15: # %entry -; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB18_16: # %entry ; CHECK-NEXT: mv a0, s0 ; CHECK-NEXT: mv a1, a2 @@ -1536,22 +1458,17 @@ ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: .cfi_offset s1, -24 ; CHECK-NEXT: .cfi_offset fs0, -32 -; CHECK-NEXT: fmv.d fs0, fa0 -; CHECK-NEXT: fmv.d fa0, fa1 +; CHECK-NEXT: fmv.d fs0, fa1 ; CHECK-NEXT: call __fixunsdfti@plt ; CHECK-NEXT: mv s0, a0 ; CHECK-NEXT: mv s1, a1 ; CHECK-NEXT: fmv.d fa0, fs0 ; CHECK-NEXT: call __fixunsdfti@plt -; CHECK-NEXT: beqz a1, .LBB19_2 -; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB19_2: # %entry -; CHECK-NEXT: beqz s1, .LBB19_4 -; CHECK-NEXT: # %bb.3: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: .LBB19_4: # %entry -; CHECK-NEXT: mv a1, s0 +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: seqz a2, s1 +; CHECK-NEXT: and a2, a2, s0 +; CHECK-NEXT: and a1, a1, a0 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1579,58 +1496,37 @@ ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: .cfi_offset s1, -24 ; CHECK-NEXT: .cfi_offset fs0, -32 -; CHECK-NEXT: fmv.d fs0, fa0 -; CHECK-NEXT: fmv.d fa0, fa1 +; CHECK-NEXT: fmv.d fs0, fa1 ; CHECK-NEXT: call __fixdfti@plt ; CHECK-NEXT: mv s0, a0 ; CHECK-NEXT: mv s1, a1 ; CHECK-NEXT: fmv.d fa0, fs0 ; CHECK-NEXT: call __fixdfti@plt -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: bgtz a1, .LBB20_7 +; CHECK-NEXT: mv a2, s1 +; CHECK-NEXT: blez s1, .LBB20_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a3, s1 -; CHECK-NEXT: bgtz s1, .LBB20_8 +; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: .LBB20_2: # %entry -; CHECK-NEXT: bgtz a1, .LBB20_9 -; CHECK-NEXT: .LBB20_3: # %entry -; CHECK-NEXT: bgtz s1, .LBB20_10 +; CHECK-NEXT: slti a3, a1, 1 +; CHECK-NEXT: blez a1, .LBB20_4 +; CHECK-NEXT: # %bb.3: # %entry +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: .LBB20_4: # %entry -; CHECK-NEXT: beqz a3, .LBB20_11 -; CHECK-NEXT: .LBB20_5: # %entry -; CHECK-NEXT: sgtz a1, a3 -; CHECK-NEXT: bnez a2, .LBB20_12 -; CHECK-NEXT: .LBB20_6: -; CHECK-NEXT: snez a2, a0 -; CHECK-NEXT: beqz a2, .LBB20_13 -; CHECK-NEXT: j .LBB20_14 -; CHECK-NEXT: .LBB20_7: # %entry -; CHECK-NEXT: li a2, 1 -; CHECK-NEXT: mv a3, s1 -; CHECK-NEXT: blez s1, .LBB20_2 +; CHECK-NEXT: and a3, a3, a0 +; CHECK-NEXT: slti a0, s1, 1 +; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: beqz a1, .LBB20_6 +; CHECK-NEXT: # %bb.5: # %entry +; CHECK-NEXT: sgtz a4, a1 +; CHECK-NEXT: .LBB20_6: # %entry +; CHECK-NEXT: and a0, a0, s0 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: beqz a2, .LBB20_8 +; CHECK-NEXT: # %bb.7: # %entry +; CHECK-NEXT: sgtz a1, a2 ; CHECK-NEXT: .LBB20_8: # %entry -; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: blez a1, .LBB20_3 -; CHECK-NEXT: .LBB20_9: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: blez s1, .LBB20_4 -; CHECK-NEXT: .LBB20_10: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: bnez a3, .LBB20_5 -; CHECK-NEXT: .LBB20_11: -; CHECK-NEXT: snez a1, s0 -; CHECK-NEXT: beqz a2, .LBB20_6 -; CHECK-NEXT: .LBB20_12: # %entry -; CHECK-NEXT: sgtz a2, a2 -; CHECK-NEXT: bnez a2, .LBB20_14 -; CHECK-NEXT: .LBB20_13: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB20_14: # %entry -; CHECK-NEXT: bnez a1, .LBB20_16 -; CHECK-NEXT: # %bb.15: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: .LBB20_16: # %entry -; CHECK-NEXT: mv a1, s0 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: and a1, a4, a3 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1684,37 +1580,35 @@ ; CHECK-NEXT: slti a5, s1, 0 ; CHECK-NEXT: bnez a5, .LBB21_6 ; CHECK-NEXT: .LBB21_5: # %entry -; CHECK-NEXT: li s1, 0 ; CHECK-NEXT: mv s0, a3 ; CHECK-NEXT: .LBB21_6: # %entry -; CHECK-NEXT: beqz a4, .LBB21_10 +; CHECK-NEXT: bnez a4, .LBB21_8 ; CHECK-NEXT: # %bb.7: # %entry -; CHECK-NEXT: slli a3, a0, 63 -; CHECK-NEXT: beq a1, a0, .LBB21_11 +; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB21_8: # %entry -; CHECK-NEXT: slt a1, a0, a1 -; CHECK-NEXT: bne s1, a0, .LBB21_12 -; CHECK-NEXT: .LBB21_9: -; CHECK-NEXT: sltu a0, a3, s0 +; CHECK-NEXT: and a5, a5, s1 +; CHECK-NEXT: and a3, a4, a1 +; CHECK-NEXT: slli a1, a0, 63 +; CHECK-NEXT: beq a3, a0, .LBB21_11 +; CHECK-NEXT: # %bb.9: # %entry +; CHECK-NEXT: slt a3, a0, a3 +; CHECK-NEXT: bne a5, a0, .LBB21_12 +; CHECK-NEXT: .LBB21_10: +; CHECK-NEXT: sltu a0, a1, s0 ; CHECK-NEXT: beqz a0, .LBB21_13 ; CHECK-NEXT: j .LBB21_14 -; CHECK-NEXT: .LBB21_10: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: slli a3, a0, 63 -; CHECK-NEXT: bne a1, a0, .LBB21_8 ; CHECK-NEXT: .LBB21_11: -; CHECK-NEXT: sltu a1, a3, a2 -; CHECK-NEXT: beq s1, a0, .LBB21_9 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: beq a5, a0, .LBB21_10 ; CHECK-NEXT: .LBB21_12: # %entry -; CHECK-NEXT: slt a0, a0, s1 +; CHECK-NEXT: slt a0, a0, a5 ; CHECK-NEXT: bnez a0, .LBB21_14 ; CHECK-NEXT: .LBB21_13: # %entry -; CHECK-NEXT: mv s0, a3 +; CHECK-NEXT: mv s0, a1 ; CHECK-NEXT: .LBB21_14: # %entry -; CHECK-NEXT: bnez a1, .LBB21_16 +; CHECK-NEXT: bnez a3, .LBB21_16 ; CHECK-NEXT: # %bb.15: # %entry -; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB21_16: # %entry ; CHECK-NEXT: mv a0, s0 ; CHECK-NEXT: mv a1, a2 @@ -1747,22 +1641,17 @@ ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: .cfi_offset s1, -24 ; CHECK-NEXT: .cfi_offset fs0, -32 -; CHECK-NEXT: fmv.s fs0, fa0 -; CHECK-NEXT: fmv.s fa0, fa1 +; CHECK-NEXT: fmv.s fs0, fa1 ; CHECK-NEXT: call __fixunssfti@plt ; CHECK-NEXT: mv s0, a0 ; CHECK-NEXT: mv s1, a1 ; CHECK-NEXT: fmv.s fa0, fs0 ; CHECK-NEXT: call __fixunssfti@plt -; CHECK-NEXT: beqz a1, .LBB22_2 -; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB22_2: # %entry -; CHECK-NEXT: beqz s1, .LBB22_4 -; CHECK-NEXT: # %bb.3: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: .LBB22_4: # %entry -; CHECK-NEXT: mv a1, s0 +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: seqz a2, s1 +; CHECK-NEXT: and a2, a2, s0 +; CHECK-NEXT: and a1, a1, a0 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1790,58 +1679,37 @@ ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: .cfi_offset s1, -24 ; CHECK-NEXT: .cfi_offset fs0, -32 -; CHECK-NEXT: fmv.s fs0, fa0 -; CHECK-NEXT: fmv.s fa0, fa1 +; CHECK-NEXT: fmv.s fs0, fa1 ; CHECK-NEXT: call __fixsfti@plt ; CHECK-NEXT: mv s0, a0 ; CHECK-NEXT: mv s1, a1 ; CHECK-NEXT: fmv.s fa0, fs0 ; CHECK-NEXT: call __fixsfti@plt -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: bgtz a1, .LBB23_7 +; CHECK-NEXT: mv a2, s1 +; CHECK-NEXT: blez s1, .LBB23_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a3, s1 -; CHECK-NEXT: bgtz s1, .LBB23_8 +; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: .LBB23_2: # %entry -; CHECK-NEXT: bgtz a1, .LBB23_9 -; CHECK-NEXT: .LBB23_3: # %entry -; CHECK-NEXT: bgtz s1, .LBB23_10 +; CHECK-NEXT: slti a3, a1, 1 +; CHECK-NEXT: blez a1, .LBB23_4 +; CHECK-NEXT: # %bb.3: # %entry +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: .LBB23_4: # %entry -; CHECK-NEXT: beqz a3, .LBB23_11 -; CHECK-NEXT: .LBB23_5: # %entry -; CHECK-NEXT: sgtz a1, a3 -; CHECK-NEXT: bnez a2, .LBB23_12 -; CHECK-NEXT: .LBB23_6: -; CHECK-NEXT: snez a2, a0 -; CHECK-NEXT: beqz a2, .LBB23_13 -; CHECK-NEXT: j .LBB23_14 -; CHECK-NEXT: .LBB23_7: # %entry -; CHECK-NEXT: li a2, 1 -; CHECK-NEXT: mv a3, s1 -; CHECK-NEXT: blez s1, .LBB23_2 +; CHECK-NEXT: and a3, a3, a0 +; CHECK-NEXT: slti a0, s1, 1 +; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: beqz a1, .LBB23_6 +; CHECK-NEXT: # %bb.5: # %entry +; CHECK-NEXT: sgtz a4, a1 +; CHECK-NEXT: .LBB23_6: # %entry +; CHECK-NEXT: and a0, a0, s0 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: beqz a2, .LBB23_8 +; CHECK-NEXT: # %bb.7: # %entry +; CHECK-NEXT: sgtz a1, a2 ; CHECK-NEXT: .LBB23_8: # %entry -; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: blez a1, .LBB23_3 -; CHECK-NEXT: .LBB23_9: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: blez s1, .LBB23_4 -; CHECK-NEXT: .LBB23_10: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: bnez a3, .LBB23_5 -; CHECK-NEXT: .LBB23_11: -; CHECK-NEXT: snez a1, s0 -; CHECK-NEXT: beqz a2, .LBB23_6 -; CHECK-NEXT: .LBB23_12: # %entry -; CHECK-NEXT: sgtz a2, a2 -; CHECK-NEXT: bnez a2, .LBB23_14 -; CHECK-NEXT: .LBB23_13: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB23_14: # %entry -; CHECK-NEXT: bnez a1, .LBB23_16 -; CHECK-NEXT: # %bb.15: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: .LBB23_16: # %entry -; CHECK-NEXT: mv a1, s0 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: and a1, a4, a3 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -1897,37 +1765,35 @@ ; CHECK-NEXT: slti a5, s1, 0 ; CHECK-NEXT: bnez a5, .LBB24_6 ; CHECK-NEXT: .LBB24_5: # %entry -; CHECK-NEXT: li s1, 0 ; CHECK-NEXT: mv s0, a3 ; CHECK-NEXT: .LBB24_6: # %entry -; CHECK-NEXT: beqz a4, .LBB24_10 +; CHECK-NEXT: bnez a4, .LBB24_8 ; CHECK-NEXT: # %bb.7: # %entry -; CHECK-NEXT: slli a3, a0, 63 -; CHECK-NEXT: beq a1, a0, .LBB24_11 +; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB24_8: # %entry -; CHECK-NEXT: slt a1, a0, a1 -; CHECK-NEXT: bne s1, a0, .LBB24_12 -; CHECK-NEXT: .LBB24_9: -; CHECK-NEXT: sltu a0, a3, s0 +; CHECK-NEXT: and a5, a5, s1 +; CHECK-NEXT: and a3, a4, a1 +; CHECK-NEXT: slli a1, a0, 63 +; CHECK-NEXT: beq a3, a0, .LBB24_11 +; CHECK-NEXT: # %bb.9: # %entry +; CHECK-NEXT: slt a3, a0, a3 +; CHECK-NEXT: bne a5, a0, .LBB24_12 +; CHECK-NEXT: .LBB24_10: +; CHECK-NEXT: sltu a0, a1, s0 ; CHECK-NEXT: beqz a0, .LBB24_13 ; CHECK-NEXT: j .LBB24_14 -; CHECK-NEXT: .LBB24_10: # %entry -; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: mv a2, a3 -; CHECK-NEXT: slli a3, a0, 63 -; CHECK-NEXT: bne a1, a0, .LBB24_8 ; CHECK-NEXT: .LBB24_11: -; CHECK-NEXT: sltu a1, a3, a2 -; CHECK-NEXT: beq s1, a0, .LBB24_9 +; CHECK-NEXT: sltu a3, a1, a2 +; CHECK-NEXT: beq a5, a0, .LBB24_10 ; CHECK-NEXT: .LBB24_12: # %entry -; CHECK-NEXT: slt a0, a0, s1 +; CHECK-NEXT: slt a0, a0, a5 ; CHECK-NEXT: bnez a0, .LBB24_14 ; CHECK-NEXT: .LBB24_13: # %entry -; CHECK-NEXT: mv s0, a3 +; CHECK-NEXT: mv s0, a1 ; CHECK-NEXT: .LBB24_14: # %entry -; CHECK-NEXT: bnez a1, .LBB24_16 +; CHECK-NEXT: bnez a3, .LBB24_16 ; CHECK-NEXT: # %bb.15: # %entry -; CHECK-NEXT: mv a2, a3 +; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB24_16: # %entry ; CHECK-NEXT: mv a0, s0 ; CHECK-NEXT: mv a1, a2 @@ -1960,24 +1826,19 @@ ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: .cfi_offset s1, -24 ; CHECK-NEXT: .cfi_offset s2, -32 -; CHECK-NEXT: mv s2, a0 -; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: mv s0, a1 ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: call __fixunssfti@plt -; CHECK-NEXT: mv s0, a0 -; CHECK-NEXT: mv s1, a1 -; CHECK-NEXT: mv a0, s2 +; CHECK-NEXT: mv s1, a0 +; CHECK-NEXT: mv s2, a1 +; CHECK-NEXT: mv a0, s0 ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: call __fixunssfti@plt -; CHECK-NEXT: beqz a1, .LBB25_2 -; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB25_2: # %entry -; CHECK-NEXT: beqz s1, .LBB25_4 -; CHECK-NEXT: # %bb.3: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: .LBB25_4: # %entry -; CHECK-NEXT: mv a1, s0 +; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: seqz a2, s2 +; CHECK-NEXT: and a2, a2, s1 +; CHECK-NEXT: and a1, a1, a0 +; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -2005,8 +1866,7 @@ ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: .cfi_offset s1, -24 ; CHECK-NEXT: .cfi_offset s2, -32 -; CHECK-NEXT: mv s2, a0 -; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: mv s2, a1 ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: call __fixsfti@plt ; CHECK-NEXT: mv s0, a0 @@ -2014,51 +1874,31 @@ ; CHECK-NEXT: mv a0, s2 ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: call __fixsfti@plt -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: bgtz a1, .LBB26_7 +; CHECK-NEXT: mv a2, s1 +; CHECK-NEXT: blez s1, .LBB26_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a3, s1 -; CHECK-NEXT: bgtz s1, .LBB26_8 +; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: .LBB26_2: # %entry -; CHECK-NEXT: bgtz a1, .LBB26_9 -; CHECK-NEXT: .LBB26_3: # %entry -; CHECK-NEXT: bgtz s1, .LBB26_10 +; CHECK-NEXT: slti a3, a1, 1 +; CHECK-NEXT: blez a1, .LBB26_4 +; CHECK-NEXT: # %bb.3: # %entry +; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: .LBB26_4: # %entry -; CHECK-NEXT: beqz a3, .LBB26_11 -; CHECK-NEXT: .LBB26_5: # %entry -; CHECK-NEXT: sgtz a1, a3 -; CHECK-NEXT: bnez a2, .LBB26_12 -; CHECK-NEXT: .LBB26_6: -; CHECK-NEXT: snez a2, a0 -; CHECK-NEXT: beqz a2, .LBB26_13 -; CHECK-NEXT: j .LBB26_14 -; CHECK-NEXT: .LBB26_7: # %entry -; CHECK-NEXT: li a2, 1 -; CHECK-NEXT: mv a3, s1 -; CHECK-NEXT: blez s1, .LBB26_2 +; CHECK-NEXT: and a3, a3, a0 +; CHECK-NEXT: slti a0, s1, 1 +; CHECK-NEXT: mv a4, a3 +; CHECK-NEXT: beqz a1, .LBB26_6 +; CHECK-NEXT: # %bb.5: # %entry +; CHECK-NEXT: sgtz a4, a1 +; CHECK-NEXT: .LBB26_6: # %entry +; CHECK-NEXT: and a0, a0, s0 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: beqz a2, .LBB26_8 +; CHECK-NEXT: # %bb.7: # %entry +; CHECK-NEXT: sgtz a1, a2 ; CHECK-NEXT: .LBB26_8: # %entry -; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: blez a1, .LBB26_3 -; CHECK-NEXT: .LBB26_9: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: blez s1, .LBB26_4 -; CHECK-NEXT: .LBB26_10: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: bnez a3, .LBB26_5 -; CHECK-NEXT: .LBB26_11: -; CHECK-NEXT: snez a1, s0 -; CHECK-NEXT: beqz a2, .LBB26_6 -; CHECK-NEXT: .LBB26_12: # %entry -; CHECK-NEXT: sgtz a2, a2 -; CHECK-NEXT: bnez a2, .LBB26_14 -; CHECK-NEXT: .LBB26_13: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB26_14: # %entry -; CHECK-NEXT: bnez a1, .LBB26_16 -; CHECK-NEXT: # %bb.15: # %entry -; CHECK-NEXT: li s0, 0 -; CHECK-NEXT: .LBB26_16: # %entry -; CHECK-NEXT: mv a1, s0 +; CHECK-NEXT: and a0, a1, a0 +; CHECK-NEXT: and a1, a4, a3 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload @@ -3614,36 +3454,27 @@ ; CHECK-NEXT: mv s1, a1 ; CHECK-NEXT: fmv.d fa0, fs0 ; CHECK-NEXT: call __fixunsdfti@plt +; CHECK-NEXT: li a4, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: beqz a3, .LBB46_2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: beq a3, a4, .LBB46_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: seqz a0, a3 +; CHECK-NEXT: and a0, a0, a2 ; CHECK-NEXT: .LBB46_2: # %entry -; CHECK-NEXT: li a4, 1 -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: bne a3, a4, .LBB46_7 +; CHECK-NEXT: beq s1, a4, .LBB46_4 ; CHECK-NEXT: # %bb.3: # %entry -; CHECK-NEXT: bne s1, a1, .LBB46_8 +; CHECK-NEXT: seqz a1, s1 +; CHECK-NEXT: and a1, a1, s0 ; CHECK-NEXT: .LBB46_4: # %entry -; CHECK-NEXT: beq s1, a4, .LBB46_6 -; CHECK-NEXT: .LBB46_5: # %entry -; CHECK-NEXT: mv a1, s0 -; CHECK-NEXT: .LBB46_6: # %entry ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB46_7: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: beq s1, a1, .LBB46_4 -; CHECK-NEXT: .LBB46_8: # %entry -; CHECK-NEXT: mv s0, a1 -; CHECK-NEXT: bne s1, a4, .LBB46_5 -; CHECK-NEXT: j .LBB46_6 entry: %conv = fptoui <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -3671,74 +3502,54 @@ ; CHECK-NEXT: fmv.d fa0, fs0 ; CHECK-NEXT: call __fixdfti@plt ; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: li a5, 1 +; CHECK-NEXT: li a4, 1 ; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: bgtz a1, .LBB47_12 +; CHECK-NEXT: bgtz a1, .LBB47_8 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a4, s1 -; CHECK-NEXT: bgtz s1, .LBB47_13 +; CHECK-NEXT: mv a5, s1 +; CHECK-NEXT: bgtz s1, .LBB47_9 ; CHECK-NEXT: .LBB47_2: # %entry -; CHECK-NEXT: bgtz a2, .LBB47_14 -; CHECK-NEXT: .LBB47_3: # %entry ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bne a2, a5, .LBB47_15 +; CHECK-NEXT: bne a2, a4, .LBB47_10 +; CHECK-NEXT: .LBB47_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: bne s1, a4, .LBB47_11 ; CHECK-NEXT: .LBB47_4: # %entry -; CHECK-NEXT: bgtz s1, .LBB47_16 +; CHECK-NEXT: bnez a5, .LBB47_12 ; CHECK-NEXT: .LBB47_5: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: bne s1, a5, .LBB47_17 +; CHECK-NEXT: beqz a3, .LBB47_7 ; CHECK-NEXT: .LBB47_6: # %entry -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: blez a4, .LBB47_18 +; CHECK-NEXT: sgtz a2, a3 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: .LBB47_7: # %entry -; CHECK-NEXT: bnez a4, .LBB47_19 -; CHECK-NEXT: .LBB47_8: # %entry -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: blez a3, .LBB47_20 -; CHECK-NEXT: .LBB47_9: # %entry -; CHECK-NEXT: beqz a3, .LBB47_11 -; CHECK-NEXT: .LBB47_10: # %entry -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB47_11: # %entry ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB47_12: # %entry +; CHECK-NEXT: .LBB47_8: # %entry ; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: mv a4, s1 +; CHECK-NEXT: mv a5, s1 ; CHECK-NEXT: blez s1, .LBB47_2 -; CHECK-NEXT: .LBB47_13: # %entry -; CHECK-NEXT: li a4, 1 -; CHECK-NEXT: blez a2, .LBB47_3 -; CHECK-NEXT: .LBB47_14: # %entry -; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: .LBB47_9: # %entry +; CHECK-NEXT: li a5, 1 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: beq a2, a5, .LBB47_4 -; CHECK-NEXT: .LBB47_15: # %entry -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: blez s1, .LBB47_5 -; CHECK-NEXT: .LBB47_16: # %entry -; CHECK-NEXT: li s0, 0 +; CHECK-NEXT: beq a2, a4, .LBB47_3 +; CHECK-NEXT: .LBB47_10: # %entry +; CHECK-NEXT: slti a1, a2, 1 +; CHECK-NEXT: and a1, a1, a0 ; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: beq s1, a5, .LBB47_6 -; CHECK-NEXT: .LBB47_17: # %entry -; CHECK-NEXT: mv a0, s0 -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bgtz a4, .LBB47_7 -; CHECK-NEXT: .LBB47_18: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: beqz a4, .LBB47_8 -; CHECK-NEXT: .LBB47_19: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: bgtz a3, .LBB47_9 -; CHECK-NEXT: .LBB47_20: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bnez a3, .LBB47_10 -; CHECK-NEXT: j .LBB47_11 +; CHECK-NEXT: beq s1, a4, .LBB47_4 +; CHECK-NEXT: .LBB47_11: # %entry +; CHECK-NEXT: slti a0, s1, 1 +; CHECK-NEXT: and a0, a0, s0 +; CHECK-NEXT: beqz a5, .LBB47_5 +; CHECK-NEXT: .LBB47_12: # %entry +; CHECK-NEXT: sgtz a2, a5 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: bnez a3, .LBB47_6 +; CHECK-NEXT: j .LBB47_7 entry: %conv = fptosi <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -3882,36 +3693,27 @@ ; CHECK-NEXT: mv s1, a1 ; CHECK-NEXT: fmv.s fa0, fs0 ; CHECK-NEXT: call __fixunssfti@plt +; CHECK-NEXT: li a4, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: beqz a3, .LBB49_2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: beq a3, a4, .LBB49_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: seqz a0, a3 +; CHECK-NEXT: and a0, a0, a2 ; CHECK-NEXT: .LBB49_2: # %entry -; CHECK-NEXT: li a4, 1 -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: bne a3, a4, .LBB49_7 +; CHECK-NEXT: beq s1, a4, .LBB49_4 ; CHECK-NEXT: # %bb.3: # %entry -; CHECK-NEXT: bne s1, a1, .LBB49_8 +; CHECK-NEXT: seqz a1, s1 +; CHECK-NEXT: and a1, a1, s0 ; CHECK-NEXT: .LBB49_4: # %entry -; CHECK-NEXT: beq s1, a4, .LBB49_6 -; CHECK-NEXT: .LBB49_5: # %entry -; CHECK-NEXT: mv a1, s0 -; CHECK-NEXT: .LBB49_6: # %entry ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB49_7: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: beq s1, a1, .LBB49_4 -; CHECK-NEXT: .LBB49_8: # %entry -; CHECK-NEXT: mv s0, a1 -; CHECK-NEXT: bne s1, a4, .LBB49_5 -; CHECK-NEXT: j .LBB49_6 entry: %conv = fptoui <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -3939,74 +3741,54 @@ ; CHECK-NEXT: fmv.s fa0, fs0 ; CHECK-NEXT: call __fixsfti@plt ; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: li a5, 1 +; CHECK-NEXT: li a4, 1 ; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: bgtz a1, .LBB50_12 +; CHECK-NEXT: bgtz a1, .LBB50_8 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a4, s1 -; CHECK-NEXT: bgtz s1, .LBB50_13 +; CHECK-NEXT: mv a5, s1 +; CHECK-NEXT: bgtz s1, .LBB50_9 ; CHECK-NEXT: .LBB50_2: # %entry -; CHECK-NEXT: bgtz a2, .LBB50_14 -; CHECK-NEXT: .LBB50_3: # %entry ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bne a2, a5, .LBB50_15 +; CHECK-NEXT: bne a2, a4, .LBB50_10 +; CHECK-NEXT: .LBB50_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: bne s1, a4, .LBB50_11 ; CHECK-NEXT: .LBB50_4: # %entry -; CHECK-NEXT: bgtz s1, .LBB50_16 +; CHECK-NEXT: bnez a5, .LBB50_12 ; CHECK-NEXT: .LBB50_5: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: bne s1, a5, .LBB50_17 +; CHECK-NEXT: beqz a3, .LBB50_7 ; CHECK-NEXT: .LBB50_6: # %entry -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: blez a4, .LBB50_18 +; CHECK-NEXT: sgtz a2, a3 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: .LBB50_7: # %entry -; CHECK-NEXT: bnez a4, .LBB50_19 -; CHECK-NEXT: .LBB50_8: # %entry -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: blez a3, .LBB50_20 -; CHECK-NEXT: .LBB50_9: # %entry -; CHECK-NEXT: beqz a3, .LBB50_11 -; CHECK-NEXT: .LBB50_10: # %entry -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB50_11: # %entry ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB50_12: # %entry +; CHECK-NEXT: .LBB50_8: # %entry ; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: mv a4, s1 +; CHECK-NEXT: mv a5, s1 ; CHECK-NEXT: blez s1, .LBB50_2 -; CHECK-NEXT: .LBB50_13: # %entry -; CHECK-NEXT: li a4, 1 -; CHECK-NEXT: blez a2, .LBB50_3 -; CHECK-NEXT: .LBB50_14: # %entry -; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: .LBB50_9: # %entry +; CHECK-NEXT: li a5, 1 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: beq a2, a5, .LBB50_4 -; CHECK-NEXT: .LBB50_15: # %entry -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: blez s1, .LBB50_5 -; CHECK-NEXT: .LBB50_16: # %entry -; CHECK-NEXT: li s0, 0 +; CHECK-NEXT: beq a2, a4, .LBB50_3 +; CHECK-NEXT: .LBB50_10: # %entry +; CHECK-NEXT: slti a1, a2, 1 +; CHECK-NEXT: and a1, a1, a0 ; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: beq s1, a5, .LBB50_6 -; CHECK-NEXT: .LBB50_17: # %entry -; CHECK-NEXT: mv a0, s0 -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bgtz a4, .LBB50_7 -; CHECK-NEXT: .LBB50_18: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: beqz a4, .LBB50_8 -; CHECK-NEXT: .LBB50_19: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: bgtz a3, .LBB50_9 -; CHECK-NEXT: .LBB50_20: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bnez a3, .LBB50_10 -; CHECK-NEXT: j .LBB50_11 +; CHECK-NEXT: beq s1, a4, .LBB50_4 +; CHECK-NEXT: .LBB50_11: # %entry +; CHECK-NEXT: slti a0, s1, 1 +; CHECK-NEXT: and a0, a0, s0 +; CHECK-NEXT: beqz a5, .LBB50_5 +; CHECK-NEXT: .LBB50_12: # %entry +; CHECK-NEXT: sgtz a2, a5 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: bnez a3, .LBB50_6 +; CHECK-NEXT: j .LBB50_7 entry: %conv = fptosi <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -4154,36 +3936,27 @@ ; CHECK-NEXT: mv a0, s2 ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: call __fixunssfti@plt +; CHECK-NEXT: li a4, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: beqz a3, .LBB52_2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: beq a3, a4, .LBB52_2 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: seqz a0, a3 +; CHECK-NEXT: and a0, a0, a2 ; CHECK-NEXT: .LBB52_2: # %entry -; CHECK-NEXT: li a4, 1 -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: bne a3, a4, .LBB52_7 +; CHECK-NEXT: beq s1, a4, .LBB52_4 ; CHECK-NEXT: # %bb.3: # %entry -; CHECK-NEXT: bne s1, a1, .LBB52_8 +; CHECK-NEXT: seqz a1, s1 +; CHECK-NEXT: and a1, a1, s0 ; CHECK-NEXT: .LBB52_4: # %entry -; CHECK-NEXT: beq s1, a4, .LBB52_6 -; CHECK-NEXT: .LBB52_5: # %entry -; CHECK-NEXT: mv a1, s0 -; CHECK-NEXT: .LBB52_6: # %entry ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB52_7: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: beq s1, a1, .LBB52_4 -; CHECK-NEXT: .LBB52_8: # %entry -; CHECK-NEXT: mv s0, a1 -; CHECK-NEXT: bne s1, a4, .LBB52_5 -; CHECK-NEXT: j .LBB52_6 entry: %conv = fptoui <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -4213,74 +3986,54 @@ ; CHECK-NEXT: call __extendhfsf2@plt ; CHECK-NEXT: call __fixsfti@plt ; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: li a5, 1 +; CHECK-NEXT: li a4, 1 ; CHECK-NEXT: mv a3, a1 -; CHECK-NEXT: bgtz a1, .LBB53_12 +; CHECK-NEXT: bgtz a1, .LBB53_8 ; CHECK-NEXT: # %bb.1: # %entry -; CHECK-NEXT: mv a4, s1 -; CHECK-NEXT: bgtz s1, .LBB53_13 +; CHECK-NEXT: mv a5, s1 +; CHECK-NEXT: bgtz s1, .LBB53_9 ; CHECK-NEXT: .LBB53_2: # %entry -; CHECK-NEXT: bgtz a2, .LBB53_14 -; CHECK-NEXT: .LBB53_3: # %entry ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: bne a2, a5, .LBB53_15 +; CHECK-NEXT: bne a2, a4, .LBB53_10 +; CHECK-NEXT: .LBB53_3: # %entry +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: bne s1, a4, .LBB53_11 ; CHECK-NEXT: .LBB53_4: # %entry -; CHECK-NEXT: bgtz s1, .LBB53_16 +; CHECK-NEXT: bnez a5, .LBB53_12 ; CHECK-NEXT: .LBB53_5: # %entry -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: bne s1, a5, .LBB53_17 +; CHECK-NEXT: beqz a3, .LBB53_7 ; CHECK-NEXT: .LBB53_6: # %entry -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: blez a4, .LBB53_18 +; CHECK-NEXT: sgtz a2, a3 +; CHECK-NEXT: and a1, a2, a1 ; CHECK-NEXT: .LBB53_7: # %entry -; CHECK-NEXT: bnez a4, .LBB53_19 -; CHECK-NEXT: .LBB53_8: # %entry -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: blez a3, .LBB53_20 -; CHECK-NEXT: .LBB53_9: # %entry -; CHECK-NEXT: beqz a3, .LBB53_11 -; CHECK-NEXT: .LBB53_10: # %entry -; CHECK-NEXT: mv a1, a2 -; CHECK-NEXT: .LBB53_11: # %entry ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s2, 0(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 32 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB53_12: # %entry +; CHECK-NEXT: .LBB53_8: # %entry ; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: mv a4, s1 +; CHECK-NEXT: mv a5, s1 ; CHECK-NEXT: blez s1, .LBB53_2 -; CHECK-NEXT: .LBB53_13: # %entry -; CHECK-NEXT: li a4, 1 -; CHECK-NEXT: blez a2, .LBB53_3 -; CHECK-NEXT: .LBB53_14: # %entry -; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: .LBB53_9: # %entry +; CHECK-NEXT: li a5, 1 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: beq a2, a5, .LBB53_4 -; CHECK-NEXT: .LBB53_15: # %entry -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: blez s1, .LBB53_5 -; CHECK-NEXT: .LBB53_16: # %entry -; CHECK-NEXT: li s0, 0 +; CHECK-NEXT: beq a2, a4, .LBB53_3 +; CHECK-NEXT: .LBB53_10: # %entry +; CHECK-NEXT: slti a1, a2, 1 +; CHECK-NEXT: and a1, a1, a0 ; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: beq s1, a5, .LBB53_6 -; CHECK-NEXT: .LBB53_17: # %entry -; CHECK-NEXT: mv a0, s0 -; CHECK-NEXT: mv a2, a0 -; CHECK-NEXT: bgtz a4, .LBB53_7 -; CHECK-NEXT: .LBB53_18: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: beqz a4, .LBB53_8 -; CHECK-NEXT: .LBB53_19: # %entry -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: mv a2, a1 -; CHECK-NEXT: bgtz a3, .LBB53_9 -; CHECK-NEXT: .LBB53_20: # %entry -; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: bnez a3, .LBB53_10 -; CHECK-NEXT: j .LBB53_11 +; CHECK-NEXT: beq s1, a4, .LBB53_4 +; CHECK-NEXT: .LBB53_11: # %entry +; CHECK-NEXT: slti a0, s1, 1 +; CHECK-NEXT: and a0, a0, s0 +; CHECK-NEXT: beqz a5, .LBB53_5 +; CHECK-NEXT: .LBB53_12: # %entry +; CHECK-NEXT: sgtz a2, a5 +; CHECK-NEXT: and a0, a2, a0 +; CHECK-NEXT: bnez a3, .LBB53_6 +; CHECK-NEXT: j .LBB53_7 entry: %conv = fptosi <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> ) diff --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll --- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll +++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll @@ -18,7 +18,8 @@ ; ; RV64-LABEL: neg_sel_constants: ; RV64: # %bb.0: -; RV64-NEXT: srai a0, a0, 63 +; RV64-NEXT: slti a0, a0, 0 +; RV64-NEXT: negw a0, a0 ; RV64-NEXT: andi a0, a0, 5 ; RV64-NEXT: ret %tmp.1 = icmp slt i32 %a, 0 @@ -78,15 +79,20 @@ ; Compare if positive and select of constants where one constant is zero. define i32 @pos_sel_constants(i32 signext %a) { -; CHECK-LABEL: pos_sel_constants: -; CHECK: # %bb.0: -; CHECK-NEXT: mv a1, a0 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: bgez a1, .LBB4_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB4_2: -; CHECK-NEXT: ret +; RV32-LABEL: pos_sel_constants: +; RV32: # %bb.0: +; RV32-NEXT: not a0, a0 +; RV32-NEXT: srai a0, a0, 31 +; RV32-NEXT: andi a0, a0, 5 +; RV32-NEXT: ret +; +; RV64-LABEL: pos_sel_constants: +; RV64: # %bb.0: +; RV64-NEXT: li a1, -1 +; RV64-NEXT: slt a0, a1, a0 +; RV64-NEXT: negw a0, a0 +; RV64-NEXT: andi a0, a0, 5 +; RV64-NEXT: ret %tmp.1 = icmp sgt i32 %a, -1 %retval = select i1 %tmp.1, i32 5, i32 0 ret i32 %retval @@ -119,20 +125,16 @@ define i32 @pos_sel_variable_and_zero(i32 signext %a, i32 signext %b) { ; RV32I-LABEL: pos_sel_variable_and_zero: ; RV32I: # %bb.0: -; RV32I-NEXT: bgez a0, .LBB6_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: li a1, 0 -; RV32I-NEXT: .LBB6_2: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: not a0, a0 +; RV32I-NEXT: srai a0, a0, 31 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: pos_sel_variable_and_zero: ; RV64I: # %bb.0: -; RV64I-NEXT: bgez a0, .LBB6_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: li a1, 0 -; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: not a0, a0 +; RV64I-NEXT: srai a0, a0, 31 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: pos_sel_variable_and_zero: @@ -156,18 +158,16 @@ define i32 @not_neg_sel_same_variable(i32 signext %a) { ; RV32I-LABEL: not_neg_sel_same_variable: ; RV32I: # %bb.0: -; RV32I-NEXT: bgtz a0, .LBB7_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: sgtz a1, a0 +; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: not_neg_sel_same_variable: ; RV64I: # %bb.0: -; RV64I-NEXT: bgtz a0, .LBB7_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: .LBB7_2: +; RV64I-NEXT: sgtz a1, a0 +; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: not_neg_sel_same_variable: @@ -189,19 +189,17 @@ ; RV32I-LABEL: sub_clamp_zero: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgtz a0, .LBB8_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: li a0, 0 -; RV32I-NEXT: .LBB8_2: +; RV32I-NEXT: sgtz a1, a0 +; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sub_clamp_zero: ; RV64I: # %bb.0: ; RV64I-NEXT: subw a0, a0, a1 -; RV64I-NEXT: bgtz a0, .LBB8_2 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: li a0, 0 -; RV64I-NEXT: .LBB8_2: +; RV64I-NEXT: sgtz a1, a0 +; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: sub_clamp_zero: @@ -224,12 +222,9 @@ define i8 @sel_shift_bool_i8(i1 %t) { ; CHECK-LABEL: sel_shift_bool_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: andi a1, a0, 1 -; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: bnez a1, .LBB9_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: .LBB9_2: +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: neg a0, a0 +; CHECK-NEXT: andi a0, a0, -128 ; CHECK-NEXT: ret %shl = select i1 %t, i8 128, i8 0 ret i8 %shl diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -146,28 +146,28 @@ ; RV32I-LABEL: lshr128: ; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: lw a5, 8(a1) -; RV32I-NEXT: lw a4, 12(a1) -; RV32I-NEXT: neg a6, a2 -; RV32I-NEXT: li a3, 64 +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a3, 12(a1) +; RV32I-NEXT: neg a5, a2 +; RV32I-NEXT: li a6, 64 ; RV32I-NEXT: li a7, 32 ; RV32I-NEXT: sub t1, a7, a2 -; RV32I-NEXT: sll t0, a5, a6 +; RV32I-NEXT: sll t0, a4, a5 ; RV32I-NEXT: bltz t1, .LBB6_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv t2, t0 ; RV32I-NEXT: j .LBB6_3 ; RV32I-NEXT: .LBB6_2: -; RV32I-NEXT: sll a6, a4, a6 -; RV32I-NEXT: sub a7, a3, a2 +; RV32I-NEXT: sll a5, a3, a5 +; RV32I-NEXT: sub a7, a6, a2 ; RV32I-NEXT: xori a7, a7, 31 -; RV32I-NEXT: srli t2, a5, 1 +; RV32I-NEXT: srli t2, a4, 1 ; RV32I-NEXT: srl a7, t2, a7 -; RV32I-NEXT: or t2, a6, a7 +; RV32I-NEXT: or t2, a5, a7 ; RV32I-NEXT: .LBB6_3: ; RV32I-NEXT: lw t5, 4(a1) -; RV32I-NEXT: addi a6, a2, -32 -; RV32I-NEXT: bgez a6, .LBB6_5 +; RV32I-NEXT: addi a5, a2, -32 +; RV32I-NEXT: bgez a5, .LBB6_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: srl a7, t5, a2 ; RV32I-NEXT: or t2, t2, a7 @@ -177,11 +177,11 @@ ; RV32I-NEXT: bltz t4, .LBB6_7 ; RV32I-NEXT: # %bb.6: ; RV32I-NEXT: li a7, 0 -; RV32I-NEXT: bgeu a2, a3, .LBB6_8 +; RV32I-NEXT: bgeu a2, a6, .LBB6_8 ; RV32I-NEXT: j .LBB6_9 ; RV32I-NEXT: .LBB6_7: -; RV32I-NEXT: srl a7, a4, t3 -; RV32I-NEXT: bltu a2, a3, .LBB6_9 +; RV32I-NEXT: srl a7, a3, t3 +; RV32I-NEXT: bltu a2, a6, .LBB6_9 ; RV32I-NEXT: .LBB6_8: ; RV32I-NEXT: mv t2, a7 ; RV32I-NEXT: .LBB6_9: @@ -192,9 +192,9 @@ ; RV32I-NEXT: .LBB6_11: ; RV32I-NEXT: lw a1, 0(a1) ; RV32I-NEXT: xori t2, a2, 31 -; RV32I-NEXT: bltz a6, .LBB6_13 +; RV32I-NEXT: bltz a5, .LBB6_13 ; RV32I-NEXT: # %bb.12: -; RV32I-NEXT: srl t5, t5, a6 +; RV32I-NEXT: srl t5, t5, a5 ; RV32I-NEXT: bltz t1, .LBB6_14 ; RV32I-NEXT: j .LBB6_15 ; RV32I-NEXT: .LBB6_13: @@ -206,52 +206,47 @@ ; RV32I-NEXT: .LBB6_14: ; RV32I-NEXT: or t5, t5, t0 ; RV32I-NEXT: .LBB6_15: -; RV32I-NEXT: slli t0, a4, 1 +; RV32I-NEXT: slli t0, a3, 1 ; RV32I-NEXT: bltz t4, .LBB6_17 ; RV32I-NEXT: # %bb.16: -; RV32I-NEXT: srl t1, a4, t4 -; RV32I-NEXT: bgeu a2, a3, .LBB6_18 +; RV32I-NEXT: srl t1, a3, t4 +; RV32I-NEXT: bgeu a2, a6, .LBB6_18 ; RV32I-NEXT: j .LBB6_19 ; RV32I-NEXT: .LBB6_17: -; RV32I-NEXT: srl t1, a5, t3 +; RV32I-NEXT: srl t1, a4, t3 ; RV32I-NEXT: xori t3, t3, 31 ; RV32I-NEXT: sll t3, t0, t3 ; RV32I-NEXT: or t1, t1, t3 -; RV32I-NEXT: bltu a2, a3, .LBB6_19 +; RV32I-NEXT: bltu a2, a6, .LBB6_19 ; RV32I-NEXT: .LBB6_18: ; RV32I-NEXT: mv t5, t1 ; RV32I-NEXT: .LBB6_19: ; RV32I-NEXT: bnez a2, .LBB6_22 ; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: bltz a6, .LBB6_23 +; RV32I-NEXT: bltz a5, .LBB6_23 ; RV32I-NEXT: .LBB6_21: -; RV32I-NEXT: srl a5, a4, a6 -; RV32I-NEXT: bgeu a2, a3, .LBB6_24 -; RV32I-NEXT: j .LBB6_25 +; RV32I-NEXT: srl a4, a3, a5 +; RV32I-NEXT: j .LBB6_24 ; RV32I-NEXT: .LBB6_22: ; RV32I-NEXT: mv a1, t5 -; RV32I-NEXT: bgez a6, .LBB6_21 +; RV32I-NEXT: bgez a5, .LBB6_21 ; RV32I-NEXT: .LBB6_23: -; RV32I-NEXT: srl a5, a5, a2 -; RV32I-NEXT: sll t0, t0, t2 -; RV32I-NEXT: or a5, a5, t0 -; RV32I-NEXT: bltu a2, a3, .LBB6_25 +; RV32I-NEXT: srl a4, a4, a2 +; RV32I-NEXT: sll a6, t0, t2 +; RV32I-NEXT: or a4, a4, a6 ; RV32I-NEXT: .LBB6_24: -; RV32I-NEXT: li a5, 0 -; RV32I-NEXT: .LBB6_25: -; RV32I-NEXT: bltz a6, .LBB6_27 -; RV32I-NEXT: # %bb.26: -; RV32I-NEXT: li a4, 0 -; RV32I-NEXT: bgeu a2, a3, .LBB6_28 -; RV32I-NEXT: j .LBB6_29 +; RV32I-NEXT: sltiu a6, a2, 64 +; RV32I-NEXT: and a4, a6, a4 +; RV32I-NEXT: bltz a5, .LBB6_26 +; RV32I-NEXT: # %bb.25: +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: j .LBB6_27 +; RV32I-NEXT: .LBB6_26: +; RV32I-NEXT: srl a2, a3, a2 ; RV32I-NEXT: .LBB6_27: -; RV32I-NEXT: srl a4, a4, a2 -; RV32I-NEXT: bltu a2, a3, .LBB6_29 -; RV32I-NEXT: .LBB6_28: -; RV32I-NEXT: li a4, 0 -; RV32I-NEXT: .LBB6_29: -; RV32I-NEXT: sw a4, 12(a0) -; RV32I-NEXT: sw a5, 8(a0) +; RV32I-NEXT: and a2, a6, a2 +; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: sw a4, 8(a0) ; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: sw a7, 4(a0) ; RV32I-NEXT: ret @@ -419,28 +414,28 @@ ; RV32I-LABEL: shl128: ; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: lw a5, 4(a1) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: neg a6, a2 -; RV32I-NEXT: li a3, 64 +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: neg a5, a2 +; RV32I-NEXT: li a6, 64 ; RV32I-NEXT: li a7, 32 ; RV32I-NEXT: sub t1, a7, a2 -; RV32I-NEXT: srl t0, a5, a6 +; RV32I-NEXT: srl t0, a4, a5 ; RV32I-NEXT: bltz t1, .LBB8_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv t2, t0 ; RV32I-NEXT: j .LBB8_3 ; RV32I-NEXT: .LBB8_2: -; RV32I-NEXT: srl a6, a4, a6 -; RV32I-NEXT: sub a7, a3, a2 +; RV32I-NEXT: srl a5, a3, a5 +; RV32I-NEXT: sub a7, a6, a2 ; RV32I-NEXT: xori a7, a7, 31 -; RV32I-NEXT: slli t2, a5, 1 +; RV32I-NEXT: slli t2, a4, 1 ; RV32I-NEXT: sll a7, t2, a7 -; RV32I-NEXT: or t2, a6, a7 +; RV32I-NEXT: or t2, a5, a7 ; RV32I-NEXT: .LBB8_3: ; RV32I-NEXT: lw t5, 8(a1) -; RV32I-NEXT: addi a6, a2, -32 -; RV32I-NEXT: bgez a6, .LBB8_5 +; RV32I-NEXT: addi a5, a2, -32 +; RV32I-NEXT: bgez a5, .LBB8_5 ; RV32I-NEXT: # %bb.4: ; RV32I-NEXT: sll a7, t5, a2 ; RV32I-NEXT: or t2, t2, a7 @@ -450,11 +445,11 @@ ; RV32I-NEXT: bltz t4, .LBB8_7 ; RV32I-NEXT: # %bb.6: ; RV32I-NEXT: li a7, 0 -; RV32I-NEXT: bgeu a2, a3, .LBB8_8 +; RV32I-NEXT: bgeu a2, a6, .LBB8_8 ; RV32I-NEXT: j .LBB8_9 ; RV32I-NEXT: .LBB8_7: -; RV32I-NEXT: sll a7, a4, t3 -; RV32I-NEXT: bltu a2, a3, .LBB8_9 +; RV32I-NEXT: sll a7, a3, t3 +; RV32I-NEXT: bltu a2, a6, .LBB8_9 ; RV32I-NEXT: .LBB8_8: ; RV32I-NEXT: mv t2, a7 ; RV32I-NEXT: .LBB8_9: @@ -465,9 +460,9 @@ ; RV32I-NEXT: .LBB8_11: ; RV32I-NEXT: lw a1, 12(a1) ; RV32I-NEXT: xori t2, a2, 31 -; RV32I-NEXT: bltz a6, .LBB8_13 +; RV32I-NEXT: bltz a5, .LBB8_13 ; RV32I-NEXT: # %bb.12: -; RV32I-NEXT: sll t5, t5, a6 +; RV32I-NEXT: sll t5, t5, a5 ; RV32I-NEXT: bltz t1, .LBB8_14 ; RV32I-NEXT: j .LBB8_15 ; RV32I-NEXT: .LBB8_13: @@ -479,52 +474,47 @@ ; RV32I-NEXT: .LBB8_14: ; RV32I-NEXT: or t5, t5, t0 ; RV32I-NEXT: .LBB8_15: -; RV32I-NEXT: srli t0, a4, 1 +; RV32I-NEXT: srli t0, a3, 1 ; RV32I-NEXT: bltz t4, .LBB8_17 ; RV32I-NEXT: # %bb.16: -; RV32I-NEXT: sll t1, a4, t4 -; RV32I-NEXT: bgeu a2, a3, .LBB8_18 +; RV32I-NEXT: sll t1, a3, t4 +; RV32I-NEXT: bgeu a2, a6, .LBB8_18 ; RV32I-NEXT: j .LBB8_19 ; RV32I-NEXT: .LBB8_17: -; RV32I-NEXT: sll t1, a5, t3 +; RV32I-NEXT: sll t1, a4, t3 ; RV32I-NEXT: xori t3, t3, 31 ; RV32I-NEXT: srl t3, t0, t3 ; RV32I-NEXT: or t1, t1, t3 -; RV32I-NEXT: bltu a2, a3, .LBB8_19 +; RV32I-NEXT: bltu a2, a6, .LBB8_19 ; RV32I-NEXT: .LBB8_18: ; RV32I-NEXT: mv t5, t1 ; RV32I-NEXT: .LBB8_19: ; RV32I-NEXT: bnez a2, .LBB8_22 ; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: bltz a6, .LBB8_23 +; RV32I-NEXT: bltz a5, .LBB8_23 ; RV32I-NEXT: .LBB8_21: -; RV32I-NEXT: sll a5, a4, a6 -; RV32I-NEXT: bgeu a2, a3, .LBB8_24 -; RV32I-NEXT: j .LBB8_25 +; RV32I-NEXT: sll a4, a3, a5 +; RV32I-NEXT: j .LBB8_24 ; RV32I-NEXT: .LBB8_22: ; RV32I-NEXT: mv a1, t5 -; RV32I-NEXT: bgez a6, .LBB8_21 +; RV32I-NEXT: bgez a5, .LBB8_21 ; RV32I-NEXT: .LBB8_23: -; RV32I-NEXT: sll a5, a5, a2 -; RV32I-NEXT: srl t0, t0, t2 -; RV32I-NEXT: or a5, a5, t0 -; RV32I-NEXT: bltu a2, a3, .LBB8_25 +; RV32I-NEXT: sll a4, a4, a2 +; RV32I-NEXT: srl a6, t0, t2 +; RV32I-NEXT: or a4, a4, a6 ; RV32I-NEXT: .LBB8_24: -; RV32I-NEXT: li a5, 0 -; RV32I-NEXT: .LBB8_25: -; RV32I-NEXT: bltz a6, .LBB8_27 -; RV32I-NEXT: # %bb.26: -; RV32I-NEXT: li a4, 0 -; RV32I-NEXT: bgeu a2, a3, .LBB8_28 -; RV32I-NEXT: j .LBB8_29 +; RV32I-NEXT: sltiu a6, a2, 64 +; RV32I-NEXT: and a4, a6, a4 +; RV32I-NEXT: bltz a5, .LBB8_26 +; RV32I-NEXT: # %bb.25: +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: j .LBB8_27 +; RV32I-NEXT: .LBB8_26: +; RV32I-NEXT: sll a2, a3, a2 ; RV32I-NEXT: .LBB8_27: -; RV32I-NEXT: sll a4, a4, a2 -; RV32I-NEXT: bltu a2, a3, .LBB8_29 -; RV32I-NEXT: .LBB8_28: -; RV32I-NEXT: li a4, 0 -; RV32I-NEXT: .LBB8_29: -; RV32I-NEXT: sw a4, 0(a0) -; RV32I-NEXT: sw a5, 4(a0) +; RV32I-NEXT: and a2, a6, a2 +; RV32I-NEXT: sw a2, 0(a0) +; RV32I-NEXT: sw a4, 4(a0) ; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: sw a7, 8(a0) ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -15,36 +15,24 @@ ; RV32-NEXT: lui a2, 16 ; RV32-NEXT: addi a2, a2, -256 ; RV32-NEXT: and a2, a0, a2 -; RV32-NEXT: slli a3, a2, 16 -; RV32-NEXT: srai a3, a3, 24 -; RV32-NEXT: slli a4, a0, 24 -; RV32-NEXT: srai a6, a4, 24 -; RV32-NEXT: slli a4, a0, 8 -; RV32-NEXT: mv a5, a0 -; RV32-NEXT: bgtz a6, .LBB0_2 -; RV32-NEXT: # %bb.1: -; RV32-NEXT: li a5, 0 -; RV32-NEXT: .LBB0_2: -; RV32-NEXT: srai a4, a4, 24 -; RV32-NEXT: andi a5, a5, 255 -; RV32-NEXT: bgtz a3, .LBB0_4 -; RV32-NEXT: # %bb.3: -; RV32-NEXT: li a2, 0 -; RV32-NEXT: j .LBB0_5 -; RV32-NEXT: .LBB0_4: -; RV32-NEXT: srli a2, a2, 8 -; RV32-NEXT: .LBB0_5: +; RV32-NEXT: srli a3, a2, 8 +; RV32-NEXT: slli a2, a2, 16 +; RV32-NEXT: srai a2, a2, 24 +; RV32-NEXT: srli a4, a0, 16 +; RV32-NEXT: slli a5, a0, 24 +; RV32-NEXT: srai a5, a5, 24 +; RV32-NEXT: slli a6, a0, 8 +; RV32-NEXT: srai a6, a6, 24 +; RV32-NEXT: sgtz a6, a6 +; RV32-NEXT: sgtz a5, a5 +; RV32-NEXT: sgtz a2, a2 +; RV32-NEXT: and a2, a2, a3 ; RV32-NEXT: slli a2, a2, 8 -; RV32-NEXT: or a2, a5, a2 -; RV32-NEXT: bgtz a4, .LBB0_7 -; RV32-NEXT: # %bb.6: -; RV32-NEXT: li a0, 0 -; RV32-NEXT: j .LBB0_8 -; RV32-NEXT: .LBB0_7: -; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: .LBB0_8: -; RV32-NEXT: sb a0, 2(a1) -; RV32-NEXT: sh a2, 0(a1) +; RV32-NEXT: and a0, a5, a0 +; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: and a2, a6, a4 +; RV32-NEXT: sb a2, 2(a1) +; RV32-NEXT: sh a0, 0(a1) ; RV32-NEXT: ret ; ; RV64-LABEL: vec3_setcc_crash: @@ -53,36 +41,24 @@ ; RV64-NEXT: lui a2, 16 ; RV64-NEXT: addiw a2, a2, -256 ; RV64-NEXT: and a2, a0, a2 -; RV64-NEXT: slli a3, a2, 48 -; RV64-NEXT: srai a3, a3, 56 -; RV64-NEXT: slli a4, a0, 56 -; RV64-NEXT: srai a6, a4, 56 -; RV64-NEXT: slli a4, a0, 40 -; RV64-NEXT: mv a5, a0 -; RV64-NEXT: bgtz a6, .LBB0_2 -; RV64-NEXT: # %bb.1: -; RV64-NEXT: li a5, 0 -; RV64-NEXT: .LBB0_2: -; RV64-NEXT: srai a4, a4, 56 -; RV64-NEXT: andi a5, a5, 255 -; RV64-NEXT: bgtz a3, .LBB0_4 -; RV64-NEXT: # %bb.3: -; RV64-NEXT: li a2, 0 -; RV64-NEXT: j .LBB0_5 -; RV64-NEXT: .LBB0_4: -; RV64-NEXT: srli a2, a2, 8 -; RV64-NEXT: .LBB0_5: +; RV64-NEXT: srli a3, a2, 8 +; RV64-NEXT: slli a2, a2, 48 +; RV64-NEXT: srai a2, a2, 56 +; RV64-NEXT: srli a4, a0, 16 +; RV64-NEXT: slli a5, a0, 56 +; RV64-NEXT: srai a5, a5, 56 +; RV64-NEXT: slli a6, a0, 40 +; RV64-NEXT: srai a6, a6, 56 +; RV64-NEXT: sgtz a6, a6 +; RV64-NEXT: sgtz a5, a5 +; RV64-NEXT: sgtz a2, a2 +; RV64-NEXT: and a2, a2, a3 ; RV64-NEXT: slli a2, a2, 8 -; RV64-NEXT: or a2, a5, a2 -; RV64-NEXT: bgtz a4, .LBB0_7 -; RV64-NEXT: # %bb.6: -; RV64-NEXT: li a0, 0 -; RV64-NEXT: j .LBB0_8 -; RV64-NEXT: .LBB0_7: -; RV64-NEXT: srli a0, a0, 16 -; RV64-NEXT: .LBB0_8: -; RV64-NEXT: sb a0, 2(a1) -; RV64-NEXT: sh a2, 0(a1) +; RV64-NEXT: and a0, a5, a0 +; RV64-NEXT: or a0, a0, a2 +; RV64-NEXT: and a2, a6, a4 +; RV64-NEXT: sb a2, 2(a1) +; RV64-NEXT: sh a0, 0(a1) ; RV64-NEXT: ret %a = load <3 x i8>, <3 x i8>* %in %cmp = icmp sgt <3 x i8> %a, zeroinitializer