Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3814,16 +3814,25 @@ SDValue C) const { // Check integral scalar types. if (VT.isScalarInteger()) { - // Do not perform the transformation on riscv32 with the M extension. - if (!Subtarget.is64Bit() && Subtarget.hasStdExtM()) - return false; if (auto *ConstNode = dyn_cast(C.getNode())) { - if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t)) + const APInt &APImm = ConstNode->getAPIntValue(); + // Do not perform the transformation on riscv32 with the M extension or + // if the data type exceeds 64-bit. + if ((Subtarget.hasStdExtM() || VT.getSizeInBits() > 64) && + !Subtarget.is64Bit()) return false; - int64_t Imm = ConstNode->getSExtValue(); - if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) || - isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm)) + // Optimize to ±(1<getSExtValue(); + if ((Imm < -2048 || 2047 < Imm) && (Imm & 0x7ff) != 0) { + int64_t Tmp = Imm >> APImm.countTrailingZeros(); + if (isPowerOf2_64(Tmp + 1) || isPowerOf2_64(Tmp - 1) || + isPowerOf2_64(1 - Tmp)) + return true; + } } } Index: llvm/test/CodeGen/RISCV/mul.ll =================================================================== --- llvm/test/CodeGen/RISCV/mul.ll +++ llvm/test/CodeGen/RISCV/mul.ll @@ -638,13 +638,9 @@ define i32 @muli32_p4352(i32 %a) nounwind { ; RV32I-LABEL: muli32_p4352: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a1, 1 -; RV32I-NEXT: addi a1, a1, 256 -; RV32I-NEXT: call __mulsi3@plt -; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: slli a1, a0, 8 +; RV32I-NEXT: slli a0, a0, 12 +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli32_p4352: @@ -656,20 +652,16 @@ ; ; RV64I-LABEL: muli32_p4352: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, 256 -; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1 -; RV64IM-NEXT: addiw a1, a1, 256 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: slli a1, a0, 8 +; RV64IM-NEXT: slli a0, a0, 12 +; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 4352 ret i32 %1 @@ -678,13 +670,9 @@ define i32 @muli32_p3840(i32 %a) nounwind { ; RV32I-LABEL: muli32_p3840: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a1, 1 -; RV32I-NEXT: addi a1, a1, -256 -; RV32I-NEXT: call __mulsi3@plt -; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: slli a1, a0, 8 +; RV32I-NEXT: slli a0, a0, 12 +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli32_p3840: @@ -696,20 +684,16 @@ ; ; RV64I-LABEL: muli32_p3840: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -256 -; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1 -; RV64IM-NEXT: addiw a1, a1, -256 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: slli a1, a0, 8 +; RV64IM-NEXT: slli a0, a0, 12 +; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 3840 ret i32 %1 @@ -718,13 +702,9 @@ define i32 @muli32_m3840(i32 %a) nounwind { ; RV32I-LABEL: muli32_m3840: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a1, 1048575 -; RV32I-NEXT: addi a1, a1, 256 -; RV32I-NEXT: call __mulsi3@plt -; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: slli a1, a0, 12 +; RV32I-NEXT: slli a0, a0, 8 +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli32_m3840: @@ -736,20 +716,16 @@ ; ; RV64I-LABEL: muli32_m3840: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1048575 -; RV64I-NEXT: addiw a1, a1, 256 -; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: slli a1, a0, 12 +; RV64I-NEXT: slli a0, a0, 8 +; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1048575 -; RV64IM-NEXT: addiw a1, a1, 256 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: slli a1, a0, 12 +; RV64IM-NEXT: slli a0, a0, 8 +; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -3840 ret i32 %1 @@ -798,14 +774,18 @@ define i64 @muli64_p4352(i64 %a) nounwind { ; RV32I-LABEL: muli64_p4352: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a2, 1 -; RV32I-NEXT: addi a2, a2, 256 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: call __muldi3@plt -; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: srli a2, a0, 24 +; RV32I-NEXT: slli a3, a1, 8 +; RV32I-NEXT: or a2, a3, a2 +; RV32I-NEXT: srli a3, a0, 20 +; RV32I-NEXT: slli a1, a1, 12 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 8 +; RV32I-NEXT: slli a3, a0, 12 +; RV32I-NEXT: add a0, a3, a2 +; RV32I-NEXT: sltu a2, a0, a3 +; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli64_p4352: @@ -820,20 +800,16 @@ ; ; RV64I-LABEL: muli64_p4352: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, 256 -; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli64_p4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1 -; RV64IM-NEXT: addiw a1, a1, 256 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: slli a1, a0, 8 +; RV64IM-NEXT: slli a0, a0, 12 +; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, 4352 ret i64 %1 @@ -842,14 +818,18 @@ define i64 @muli64_p3840(i64 %a) nounwind { ; RV32I-LABEL: muli64_p3840: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a2, 1 -; RV32I-NEXT: addi a2, a2, -256 -; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: call __muldi3@plt -; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: srli a2, a0, 24 +; RV32I-NEXT: slli a3, a1, 8 +; RV32I-NEXT: or a2, a3, a2 +; RV32I-NEXT: srli a3, a0, 20 +; RV32I-NEXT: slli a1, a1, 12 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 8 +; RV32I-NEXT: slli a0, a0, 12 +; RV32I-NEXT: sltu a3, a0, a2 +; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sub a0, a0, a2 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli64_p3840: @@ -864,20 +844,16 @@ ; ; RV64I-LABEL: muli64_p3840: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1 -; RV64I-NEXT: addiw a1, a1, -256 -; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli64_p3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1 -; RV64IM-NEXT: addiw a1, a1, -256 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: slli a1, a0, 8 +; RV64IM-NEXT: slli a0, a0, 12 +; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, 3840 ret i64 %1 @@ -931,14 +907,18 @@ define i64 @muli64_m3840(i64 %a) nounwind { ; RV32I-LABEL: muli64_m3840: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a2, 1048575 -; RV32I-NEXT: addi a2, a2, 256 -; RV32I-NEXT: addi a3, zero, -1 -; RV32I-NEXT: call __muldi3@plt -; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: srli a2, a0, 20 +; RV32I-NEXT: slli a3, a1, 12 +; RV32I-NEXT: or a2, a3, a2 +; RV32I-NEXT: srli a3, a0, 24 +; RV32I-NEXT: slli a1, a1, 8 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 12 +; RV32I-NEXT: slli a0, a0, 8 +; RV32I-NEXT: sltu a3, a0, a2 +; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sub a0, a0, a2 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli64_m3840: @@ -954,21 +934,231 @@ ; ; RV64I-LABEL: muli64_m3840: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 1048575 -; RV64I-NEXT: addiw a1, a1, 256 -; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: slli a1, a0, 12 +; RV64I-NEXT: slli a0, a0, 8 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli64_m3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: lui a1, 1048575 -; RV64IM-NEXT: addiw a1, a1, 256 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: slli a1, a0, 12 +; RV64IM-NEXT: slli a0, a0, 8 +; RV64IM-NEXT: sub a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i64 %a, -3840 ret i64 %1 } + +define i128 @muli128_m3840(i128 %a) nounwind { +; RV32I-LABEL: muli128_m3840: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -64 +; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a2, 4(a1) +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a1, 12(a1) +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: addi a0, zero, -1 +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: sw a0, 16(sp) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: lui a0, 1048575 +; RV32I-NEXT: addi a0, a0, 256 +; RV32I-NEXT: sw a0, 8(sp) +; RV32I-NEXT: sw a1, 36(sp) +; RV32I-NEXT: sw a4, 32(sp) +; RV32I-NEXT: sw a2, 28(sp) +; RV32I-NEXT: addi a0, sp, 40 +; RV32I-NEXT: addi a1, sp, 24 +; RV32I-NEXT: addi a2, sp, 8 +; RV32I-NEXT: sw a3, 24(sp) +; RV32I-NEXT: call __multi3@plt +; RV32I-NEXT: lw a0, 52(sp) +; RV32I-NEXT: lw a1, 48(sp) +; RV32I-NEXT: lw a2, 44(sp) +; RV32I-NEXT: lw a3, 40(sp) +; RV32I-NEXT: sw a0, 12(s0) +; RV32I-NEXT: sw a1, 8(s0) +; RV32I-NEXT: sw a2, 4(s0) +; RV32I-NEXT: sw a3, 0(s0) +; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli128_m3840: +; RV32IM: # %bb.0: +; RV32IM-NEXT: addi sp, sp, -64 +; RV32IM-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32IM-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32IM-NEXT: lw a3, 0(a1) +; RV32IM-NEXT: lw a2, 4(a1) +; RV32IM-NEXT: lw a4, 8(a1) +; RV32IM-NEXT: lw a1, 12(a1) +; RV32IM-NEXT: mv s0, a0 +; RV32IM-NEXT: addi a0, zero, -1 +; RV32IM-NEXT: sw a0, 20(sp) +; RV32IM-NEXT: sw a0, 16(sp) +; RV32IM-NEXT: sw a0, 12(sp) +; RV32IM-NEXT: lui a0, 1048575 +; RV32IM-NEXT: addi a0, a0, 256 +; RV32IM-NEXT: sw a0, 8(sp) +; RV32IM-NEXT: sw a1, 36(sp) +; RV32IM-NEXT: sw a4, 32(sp) +; RV32IM-NEXT: sw a2, 28(sp) +; RV32IM-NEXT: addi a0, sp, 40 +; RV32IM-NEXT: addi a1, sp, 24 +; RV32IM-NEXT: addi a2, sp, 8 +; RV32IM-NEXT: sw a3, 24(sp) +; RV32IM-NEXT: call __multi3@plt +; RV32IM-NEXT: lw a0, 52(sp) +; RV32IM-NEXT: lw a1, 48(sp) +; RV32IM-NEXT: lw a2, 44(sp) +; RV32IM-NEXT: lw a3, 40(sp) +; RV32IM-NEXT: sw a0, 12(s0) +; RV32IM-NEXT: sw a1, 8(s0) +; RV32IM-NEXT: sw a2, 4(s0) +; RV32IM-NEXT: sw a3, 0(s0) +; RV32IM-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32IM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32IM-NEXT: addi sp, sp, 64 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli128_m3840: +; RV64I: # %bb.0: +; RV64I-NEXT: srli a2, a0, 52 +; RV64I-NEXT: slli a3, a1, 12 +; RV64I-NEXT: or a2, a3, a2 +; RV64I-NEXT: srli a3, a0, 56 +; RV64I-NEXT: slli a1, a1, 8 +; RV64I-NEXT: or a1, a1, a3 +; RV64I-NEXT: sub a1, a1, a2 +; RV64I-NEXT: slli a2, a0, 12 +; RV64I-NEXT: slli a0, a0, 8 +; RV64I-NEXT: sltu a3, a0, a2 +; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sub a0, a0, a2 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli128_m3840: +; RV64IM: # %bb.0: +; RV64IM-NEXT: srli a2, a0, 52 +; RV64IM-NEXT: slli a3, a1, 12 +; RV64IM-NEXT: or a2, a3, a2 +; RV64IM-NEXT: srli a3, a0, 56 +; RV64IM-NEXT: slli a1, a1, 8 +; RV64IM-NEXT: or a1, a1, a3 +; RV64IM-NEXT: sub a1, a1, a2 +; RV64IM-NEXT: slli a2, a0, 12 +; RV64IM-NEXT: slli a0, a0, 8 +; RV64IM-NEXT: sltu a3, a0, a2 +; RV64IM-NEXT: sub a1, a1, a3 +; RV64IM-NEXT: sub a0, a0, a2 +; RV64IM-NEXT: ret + %1 = mul i128 %a, -3840 + ret i128 %1 +} + +define i128 @muli128_m63(i128 %a) nounwind { +; RV32I-LABEL: muli128_m63: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -64 +; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a2, 4(a1) +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a1, 12(a1) +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: addi a0, zero, -1 +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: sw a0, 16(sp) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: addi a0, zero, -63 +; RV32I-NEXT: sw a0, 8(sp) +; RV32I-NEXT: sw a1, 36(sp) +; RV32I-NEXT: sw a4, 32(sp) +; RV32I-NEXT: sw a2, 28(sp) +; RV32I-NEXT: addi a0, sp, 40 +; RV32I-NEXT: addi a1, sp, 24 +; RV32I-NEXT: addi a2, sp, 8 +; RV32I-NEXT: sw a3, 24(sp) +; RV32I-NEXT: call __multi3@plt +; RV32I-NEXT: lw a0, 52(sp) +; RV32I-NEXT: lw a1, 48(sp) +; RV32I-NEXT: lw a2, 44(sp) +; RV32I-NEXT: lw a3, 40(sp) +; RV32I-NEXT: sw a0, 12(s0) +; RV32I-NEXT: sw a1, 8(s0) +; RV32I-NEXT: sw a2, 4(s0) +; RV32I-NEXT: sw a3, 0(s0) +; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli128_m63: +; RV32IM: # %bb.0: +; RV32IM-NEXT: addi sp, sp, -64 +; RV32IM-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32IM-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32IM-NEXT: lw a3, 0(a1) +; RV32IM-NEXT: lw a2, 4(a1) +; RV32IM-NEXT: lw a4, 8(a1) +; RV32IM-NEXT: lw a1, 12(a1) +; RV32IM-NEXT: mv s0, a0 +; RV32IM-NEXT: addi a0, zero, -1 +; RV32IM-NEXT: sw a0, 20(sp) +; RV32IM-NEXT: sw a0, 16(sp) +; RV32IM-NEXT: sw a0, 12(sp) +; RV32IM-NEXT: addi a0, zero, -63 +; RV32IM-NEXT: sw a0, 8(sp) +; RV32IM-NEXT: sw a1, 36(sp) +; RV32IM-NEXT: sw a4, 32(sp) +; RV32IM-NEXT: sw a2, 28(sp) +; RV32IM-NEXT: addi a0, sp, 40 +; RV32IM-NEXT: addi a1, sp, 24 +; RV32IM-NEXT: addi a2, sp, 8 +; RV32IM-NEXT: sw a3, 24(sp) +; RV32IM-NEXT: call __multi3@plt +; RV32IM-NEXT: lw a0, 52(sp) +; RV32IM-NEXT: lw a1, 48(sp) +; RV32IM-NEXT: lw a2, 44(sp) +; RV32IM-NEXT: lw a3, 40(sp) +; RV32IM-NEXT: sw a0, 12(s0) +; RV32IM-NEXT: sw a1, 8(s0) +; RV32IM-NEXT: sw a2, 4(s0) +; RV32IM-NEXT: sw a3, 0(s0) +; RV32IM-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32IM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32IM-NEXT: addi sp, sp, 64 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli128_m63: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 6 +; RV64I-NEXT: sltu a3, a0, a2 +; RV64I-NEXT: srli a4, a0, 58 +; RV64I-NEXT: slli a5, a1, 6 +; RV64I-NEXT: or a4, a5, a4 +; RV64I-NEXT: sub a1, a1, a4 +; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sub a0, a0, a2 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli128_m63: +; RV64IM: # %bb.0: +; RV64IM-NEXT: slli a2, a0, 6 +; RV64IM-NEXT: sltu a3, a0, a2 +; RV64IM-NEXT: srli a4, a0, 58 +; RV64IM-NEXT: slli a5, a1, 6 +; RV64IM-NEXT: or a4, a5, a4 +; RV64IM-NEXT: sub a1, a1, a4 +; RV64IM-NEXT: sub a1, a1, a3 +; RV64IM-NEXT: sub a0, a0, a2 +; RV64IM-NEXT: ret + %1 = mul i128 %a, -63 + ret i128 %1 +}