diff --git a/llvm/test/CodeGen/RISCV/add-before-mul.ll b/llvm/test/CodeGen/RISCV/add-before-mul.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/add-before-mul.ll @@ -0,0 +1,177 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + + +define i32 @add_mul_small_imm(i32 %0, i32 %1, i32 %2) { +; RV32I-LABEL: add_mul_small_imm: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: addi a1, zero, 9 +; RV32I-NEXT: call __mulsi3 +; RV32I-NEXT: addi a0, a0, 27 +; RV32I-NEXT: lw ra, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: add_mul_small_imm: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: addi a1, zero, 9 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: addi a0, a0, 27 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %tmp1 = add i32 %0, 3 + %tmp3 = mul i32 %tmp1, 9 + ret i32 %tmp3 +} + +define i32 @add_mul_large_imm(i32 %0, i32 %1, i32 %2) { +; RV32I-LABEL: add_mul_large_imm: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: lui a1, 6 +; RV32I-NEXT: addi a1, a1, -2048 +; RV32I-NEXT: call __mulsi3 +; RV32I-NEXT: lui a1, 17 +; RV32I-NEXT: addi a1, a1, -2048 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: lw ra, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: add_mul_large_imm: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: lui a1, 6 +; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: lui a1, 17 +; RV64I-NEXT: addiw a1, a1, -2048 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %tmp1 = add i32 %0, 3 + %tmp3 = mul i32 %tmp1, 22528 + ret i32 %tmp3 +} + + +define i32 @add_mul_large_imm_bitcast(i32 %in) { +; RV32I-LABEL: add_mul_large_imm_bitcast: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: sw s0, 8(sp) +; RV32I-NEXT: sw s1, 4(sp) +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: .cfi_offset s1, -12 +; RV32I-NEXT: lui a1, 912092 +; RV32I-NEXT: addi s0, a1, -273 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call __mulsi3 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call __mulsi3 +; RV32I-NEXT: add a0, s1, a0 +; RV32I-NEXT: lw s1, 4(sp) +; RV32I-NEXT: lw s0, 8(sp) +; RV32I-NEXT: lw ra, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: add_mul_large_imm_bitcast: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: .cfi_def_cfa_offset 32 +; RV64I-NEXT: sd ra, 24(sp) +; RV64I-NEXT: sd s0, 16(sp) +; RV64I-NEXT: sd s1, 8(sp) +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: .cfi_offset s1, -24 +; RV64I-NEXT: lui a1, 912092 +; RV64I-NEXT: addiw s0, a1, -273 +; RV64I-NEXT: mv a1, s0 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a1, s0 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: addw a0, s1, a0 +; RV64I-NEXT: ld s1, 8(sp) +; RV64I-NEXT: ld s0, 16(sp) +; RV64I-NEXT: ld ra, 24(sp) +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %const = bitcast i32 3735928559 to i32 + %add = add i32 %in, %const + %mul = mul i32 %add, %const + ret i32 %mul +} + +define i32 @add_mul_small_imm_bitcast(i32 %in) { +; RV32I-LABEL: add_mul_small_imm_bitcast: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) +; RV32I-NEXT: sw s0, 8(sp) +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: addi a1, zero, 24 +; RV32I-NEXT: call __mulsi3 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: addi a0, zero, 24 +; RV32I-NEXT: addi a1, zero, 24 +; RV32I-NEXT: call __mulsi3 +; RV32I-NEXT: add a0, s0, a0 +; RV32I-NEXT: lw s0, 8(sp) +; RV32I-NEXT: lw ra, 12(sp) +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: add_mul_small_imm_bitcast: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: sd s0, 0(sp) +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: addi a1, zero, 24 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: addi a0, zero, 24 +; RV64I-NEXT: addi a1, zero, 24 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: addw a0, s0, a0 +; RV64I-NEXT: ld s0, 0(sp) +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %const = bitcast i32 24 to i32 + %add = add i32 %in, %const + %mul = mul i32 %add, 24 + ret i32 %mul +}