diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -562,3 +562,413 @@ %1 = mul i64 %a, -65 ret i64 %1 } + +define i32 @muli32_p384(i32 %a) nounwind { +; RV32I-LABEL: muli32_p384: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: addi a1, zero, 384 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli32_p384: +; RV32IM: # %bb.0: +; RV32IM-NEXT: addi a1, zero, 384 +; RV32IM-NEXT: mul a0, a0, a1 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli32_p384: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi a1, zero, 384 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli32_p384: +; RV64IM: # %bb.0: +; RV64IM-NEXT: addi a1, zero, 384 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i32 %a, 384 + ret i32 %1 +} + +define i32 @muli32_p12288(i32 %a) nounwind { +; RV32I-LABEL: muli32_p12288: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a1, 3 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli32_p12288: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a1, 3 +; RV32IM-NEXT: mul a0, a0, a1 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli32_p12288: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 3 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli32_p12288: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 3 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i32 %a, 12288 + ret i32 %1 +} + +define i32 @muli32_p4352(i32 %a) nounwind { +; RV32I-LABEL: muli32_p4352: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: addi a1, a1, 256 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli32_p4352: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a1, 1 +; RV32IM-NEXT: addi a1, a1, 256 +; RV32IM-NEXT: mul a0, a0, a1 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli32_p4352: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1 +; RV64I-NEXT: addiw a1, a1, 256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli32_p4352: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1 +; RV64IM-NEXT: addiw a1, a1, 256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i32 %a, 4352 + ret i32 %1 +} + +define i32 @muli32_p3840(i32 %a) nounwind { +; RV32I-LABEL: muli32_p3840: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: addi a1, a1, -256 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli32_p3840: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a1, 1 +; RV32IM-NEXT: addi a1, a1, -256 +; RV32IM-NEXT: mul a0, a0, a1 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli32_p3840: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1 +; RV64I-NEXT: addiw a1, a1, -256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli32_p3840: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1 +; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i32 %a, 3840 + ret i32 %1 +} + +define i32 @muli32_m3840(i32 %a) nounwind { +; RV32I-LABEL: muli32_m3840: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a1, 1048575 +; RV32I-NEXT: addi a1, a1, 256 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli32_m3840: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a1, 1048575 +; RV32IM-NEXT: addi a1, a1, 256 +; RV32IM-NEXT: mul a0, a0, a1 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli32_m3840: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1048575 +; RV64I-NEXT: addiw a1, a1, 256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli32_m3840: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1048575 +; RV64IM-NEXT: addiw a1, a1, 256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i32 %a, -3840 + ret i32 %1 +} + +define i32 @muli32_m4352(i32 %a) nounwind { +; RV32I-LABEL: muli32_m4352: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a1, 1048575 +; RV32I-NEXT: addi a1, a1, -256 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli32_m4352: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a1, 1048575 +; RV32IM-NEXT: addi a1, a1, -256 +; RV32IM-NEXT: mul a0, a0, a1 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli32_m4352: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1048575 +; RV64I-NEXT: addiw a1, a1, -256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli32_m4352: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1048575 +; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i32 %a, -4352 + ret i32 %1 +} + +define i64 @muli64_p4352(i64 %a) nounwind { +; RV32I-LABEL: muli64_p4352: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a2, 1 +; RV32I-NEXT: addi a2, a2, 256 +; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: call __muldi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli64_p4352: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a2, 1 +; RV32IM-NEXT: addi a2, a2, 256 +; RV32IM-NEXT: mul a1, a1, a2 +; RV32IM-NEXT: mulhu a3, a0, a2 +; RV32IM-NEXT: add a1, a3, a1 +; RV32IM-NEXT: mul a0, a0, a2 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli64_p4352: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1 +; RV64I-NEXT: addiw a1, a1, 256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli64_p4352: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1 +; RV64IM-NEXT: addiw a1, a1, 256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i64 %a, 4352 + ret i64 %1 +} + +define i64 @muli64_p3840(i64 %a) nounwind { +; RV32I-LABEL: muli64_p3840: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a2, 1 +; RV32I-NEXT: addi a2, a2, -256 +; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: call __muldi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli64_p3840: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a2, 1 +; RV32IM-NEXT: addi a2, a2, -256 +; RV32IM-NEXT: mul a1, a1, a2 +; RV32IM-NEXT: mulhu a3, a0, a2 +; RV32IM-NEXT: add a1, a3, a1 +; RV32IM-NEXT: mul a0, a0, a2 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli64_p3840: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1 +; RV64I-NEXT: addiw a1, a1, -256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli64_p3840: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1 +; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i64 %a, 3840 + ret i64 %1 +} + +define i64 @muli64_m4352(i64 %a) nounwind { +; RV32I-LABEL: muli64_m4352: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a2, 1048575 +; RV32I-NEXT: addi a2, a2, -256 +; RV32I-NEXT: addi a3, zero, -1 +; RV32I-NEXT: call __muldi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli64_m4352: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a2, 1048575 +; RV32IM-NEXT: addi a2, a2, -256 +; RV32IM-NEXT: mul a1, a1, a2 +; RV32IM-NEXT: mulhu a3, a0, a2 +; RV32IM-NEXT: sub a3, a3, a0 +; RV32IM-NEXT: add a1, a3, a1 +; RV32IM-NEXT: mul a0, a0, a2 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli64_m4352: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1048575 +; RV64I-NEXT: addiw a1, a1, -256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli64_m4352: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1048575 +; RV64IM-NEXT: addiw a1, a1, -256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i64 %a, -4352 + ret i64 %1 +} + +define i64 @muli64_m3840(i64 %a) nounwind { +; RV32I-LABEL: muli64_m3840: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: lui a2, 1048575 +; RV32I-NEXT: addi a2, a2, 256 +; RV32I-NEXT: addi a3, zero, -1 +; RV32I-NEXT: call __muldi3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: muli64_m3840: +; RV32IM: # %bb.0: +; RV32IM-NEXT: lui a2, 1048575 +; RV32IM-NEXT: addi a2, a2, 256 +; RV32IM-NEXT: mul a1, a1, a2 +; RV32IM-NEXT: mulhu a3, a0, a2 +; RV32IM-NEXT: sub a3, a3, a0 +; RV32IM-NEXT: add a1, a3, a1 +; RV32IM-NEXT: mul a0, a0, a2 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: muli64_m3840: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: lui a1, 1048575 +; RV64I-NEXT: addiw a1, a1, 256 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: muli64_m3840: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1048575 +; RV64IM-NEXT: addiw a1, a1, 256 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret + %1 = mul i64 %a, -3840 + ret i64 %1 +}