Index: llvm/test/CodeGen/RISCV/rv32zba.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv32zba.ll +++ llvm/test/CodeGen/RISCV/rv32zba.ll @@ -80,3 +80,245 @@ %4 = load i64, i64* %3 ret i64 %4 } + +define i32 @addmul5(i32 %a, i32 %b) { +; RV32I-LABEL: addmul5: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a2, a0, 2 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul5: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sh2add a0, a0, a0 +; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul5: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: sh2add a0, a0, a0 +; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 5 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul6(i32 %a, i32 %b) { +; RV32I-LABEL: addmul6: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: addi a1, zero, 6 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: add a0, a0, s0 +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul6: +; RV32IB: # %bb.0: +; RV32IB-NEXT: addi sp, sp, -16 +; RV32IB-NEXT: .cfi_def_cfa_offset 16 +; RV32IB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IB-NEXT: .cfi_offset ra, -4 +; RV32IB-NEXT: .cfi_offset s0, -8 +; RV32IB-NEXT: mv s0, a1 +; RV32IB-NEXT: addi a1, zero, 6 +; RV32IB-NEXT: call __mulsi3@plt +; RV32IB-NEXT: add a0, a0, s0 +; RV32IB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IB-NEXT: addi sp, sp, 16 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul6: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: addi sp, sp, -16 +; RV32IBA-NEXT: .cfi_def_cfa_offset 16 +; RV32IBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IBA-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IBA-NEXT: .cfi_offset ra, -4 +; RV32IBA-NEXT: .cfi_offset s0, -8 +; RV32IBA-NEXT: mv s0, a1 +; RV32IBA-NEXT: addi a1, zero, 6 +; RV32IBA-NEXT: call __mulsi3@plt +; RV32IBA-NEXT: add a0, a0, s0 +; RV32IBA-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IBA-NEXT: addi sp, sp, 16 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 6 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul7(i32 %a, i32 %b) { +; RV32I-LABEL: addmul7: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a2, a0, 3 +; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul7: +; RV32IB: # %bb.0: +; RV32IB-NEXT: slli a2, a0, 3 +; RV32IB-NEXT: sub a0, a2, a0 +; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul7: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: slli a2, a0, 3 +; RV32IBA-NEXT: sub a0, a2, a0 +; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 7 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul9(i32 %a, i32 %b) { +; RV32I-LABEL: addmul9: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a2, a0, 3 +; RV32I-NEXT: add a0, a2, a0 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul9: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sh3add a0, a0, a0 +; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul9: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: sh3add a0, a0, a0 +; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 9 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul10(i32 %a, i32 %b) { +; RV32I-LABEL: addmul10: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: addi a1, zero, 10 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: add a0, a0, s0 +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul10: +; RV32IB: # %bb.0: +; RV32IB-NEXT: addi sp, sp, -16 +; RV32IB-NEXT: .cfi_def_cfa_offset 16 +; RV32IB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IB-NEXT: .cfi_offset ra, -4 +; RV32IB-NEXT: .cfi_offset s0, -8 +; RV32IB-NEXT: mv s0, a1 +; RV32IB-NEXT: addi a1, zero, 10 +; RV32IB-NEXT: call __mulsi3@plt +; RV32IB-NEXT: add a0, a0, s0 +; RV32IB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IB-NEXT: addi sp, sp, 16 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul10: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: addi sp, sp, -16 +; RV32IBA-NEXT: .cfi_def_cfa_offset 16 +; RV32IBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IBA-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IBA-NEXT: .cfi_offset ra, -4 +; RV32IBA-NEXT: .cfi_offset s0, -8 +; RV32IBA-NEXT: mv s0, a1 +; RV32IBA-NEXT: addi a1, zero, 10 +; RV32IBA-NEXT: call __mulsi3@plt +; RV32IBA-NEXT: add a0, a0, s0 +; RV32IBA-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IBA-NEXT: addi sp, sp, 16 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 10 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul12(i32 %a, i32 %b) { +; RV32I-LABEL: addmul12: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: .cfi_offset s0, -8 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: addi a1, zero, 12 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: add a0, a0, s0 +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul12: +; RV32IB: # %bb.0: +; RV32IB-NEXT: addi sp, sp, -16 +; RV32IB-NEXT: .cfi_def_cfa_offset 16 +; RV32IB-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IB-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IB-NEXT: .cfi_offset ra, -4 +; RV32IB-NEXT: .cfi_offset s0, -8 +; RV32IB-NEXT: mv s0, a1 +; RV32IB-NEXT: addi a1, zero, 12 +; RV32IB-NEXT: call __mulsi3@plt +; RV32IB-NEXT: add a0, a0, s0 +; RV32IB-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IB-NEXT: addi sp, sp, 16 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul12: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: addi sp, sp, -16 +; RV32IBA-NEXT: .cfi_def_cfa_offset 16 +; RV32IBA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IBA-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IBA-NEXT: .cfi_offset ra, -4 +; RV32IBA-NEXT: .cfi_offset s0, -8 +; RV32IBA-NEXT: mv s0, a1 +; RV32IBA-NEXT: addi a1, zero, 12 +; RV32IBA-NEXT: call __mulsi3@plt +; RV32IBA-NEXT: add a0, a0, s0 +; RV32IBA-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IBA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IBA-NEXT: addi sp, sp, 16 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 12 + %d = add i32 %c, %b + ret i32 %d +} Index: llvm/test/CodeGen/RISCV/rv64zba.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv64zba.ll +++ llvm/test/CodeGen/RISCV/rv64zba.ll @@ -370,3 +370,487 @@ %5 = add i64 %4, %1 ret i64 %5 } + +define i64 @addmul5(i64 %a, i64 %b) { +; RV64I-LABEL: addmul5: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 2 +; RV64I-NEXT: add a0, a2, a0 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul5: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sh2add a0, a0, a0 +; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul5: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: sh2add a0, a0, a0 +; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 5 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul6(i64 %a, i64 %b) { +; RV64I-LABEL: addmul6: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: addi a1, zero, 6 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: add a0, a0, s0 +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul6: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi sp, sp, -16 +; RV64IB-NEXT: .cfi_def_cfa_offset 16 +; RV64IB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IB-NEXT: .cfi_offset ra, -8 +; RV64IB-NEXT: .cfi_offset s0, -16 +; RV64IB-NEXT: mv s0, a1 +; RV64IB-NEXT: addi a1, zero, 6 +; RV64IB-NEXT: call __muldi3@plt +; RV64IB-NEXT: add a0, a0, s0 +; RV64IB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IB-NEXT: addi sp, sp, 16 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul6: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi sp, sp, -16 +; RV64IBA-NEXT: .cfi_def_cfa_offset 16 +; RV64IBA-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: .cfi_offset ra, -8 +; RV64IBA-NEXT: .cfi_offset s0, -16 +; RV64IBA-NEXT: mv s0, a1 +; RV64IBA-NEXT: addi a1, zero, 6 +; RV64IBA-NEXT: call __muldi3@plt +; RV64IBA-NEXT: add a0, a0, s0 +; RV64IBA-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: addi sp, sp, 16 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 6 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul7(i64 %a, i64 %b) { +; RV64I-LABEL: addmul7: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 3 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul7: +; RV64IB: # %bb.0: +; RV64IB-NEXT: slli a2, a0, 3 +; RV64IB-NEXT: sub a0, a2, a0 +; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul7: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: slli a2, a0, 3 +; RV64IBA-NEXT: sub a0, a2, a0 +; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 7 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul9(i64 %a, i64 %b) { +; RV64I-LABEL: addmul9: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 3 +; RV64I-NEXT: add a0, a2, a0 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul9: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sh3add a0, a0, a0 +; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul9: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: sh3add a0, a0, a0 +; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 9 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul10(i64 %a, i64 %b) { +; RV64I-LABEL: addmul10: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: addi a1, zero, 10 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: add a0, a0, s0 +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul10: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi sp, sp, -16 +; RV64IB-NEXT: .cfi_def_cfa_offset 16 +; RV64IB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IB-NEXT: .cfi_offset ra, -8 +; RV64IB-NEXT: .cfi_offset s0, -16 +; RV64IB-NEXT: mv s0, a1 +; RV64IB-NEXT: addi a1, zero, 10 +; RV64IB-NEXT: call __muldi3@plt +; RV64IB-NEXT: add a0, a0, s0 +; RV64IB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IB-NEXT: addi sp, sp, 16 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul10: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi sp, sp, -16 +; RV64IBA-NEXT: .cfi_def_cfa_offset 16 +; RV64IBA-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: .cfi_offset ra, -8 +; RV64IBA-NEXT: .cfi_offset s0, -16 +; RV64IBA-NEXT: mv s0, a1 +; RV64IBA-NEXT: addi a1, zero, 10 +; RV64IBA-NEXT: call __muldi3@plt +; RV64IBA-NEXT: add a0, a0, s0 +; RV64IBA-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: addi sp, sp, 16 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 10 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul12(i64 %a, i64 %b) { +; RV64I-LABEL: addmul12: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: addi a1, zero, 12 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: add a0, a0, s0 +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul12: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi sp, sp, -16 +; RV64IB-NEXT: .cfi_def_cfa_offset 16 +; RV64IB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IB-NEXT: .cfi_offset ra, -8 +; RV64IB-NEXT: .cfi_offset s0, -16 +; RV64IB-NEXT: mv s0, a1 +; RV64IB-NEXT: addi a1, zero, 12 +; RV64IB-NEXT: call __muldi3@plt +; RV64IB-NEXT: add a0, a0, s0 +; RV64IB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IB-NEXT: addi sp, sp, 16 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul12: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi sp, sp, -16 +; RV64IBA-NEXT: .cfi_def_cfa_offset 16 +; RV64IBA-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: .cfi_offset ra, -8 +; RV64IBA-NEXT: .cfi_offset s0, -16 +; RV64IBA-NEXT: mv s0, a1 +; RV64IBA-NEXT: addi a1, zero, 12 +; RV64IBA-NEXT: call __muldi3@plt +; RV64IBA-NEXT: add a0, a0, s0 +; RV64IBA-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: addi sp, sp, 16 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 12 + %d = add i64 %c, %b + ret i64 %d +} + +define i32 @addmulw5(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw5: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 2 +; RV64I-NEXT: add a0, a2, a0 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw5: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sh2add a0, a0, a0 +; RV64IB-NEXT: addw a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw5: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: sh2add a0, a0, a0 +; RV64IBA-NEXT: addw a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 5 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw6(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw6: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: addi a1, zero, 6 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: addw a0, a0, s0 +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw6: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi sp, sp, -16 +; RV64IB-NEXT: .cfi_def_cfa_offset 16 +; RV64IB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IB-NEXT: .cfi_offset ra, -8 +; RV64IB-NEXT: .cfi_offset s0, -16 +; RV64IB-NEXT: mv s0, a1 +; RV64IB-NEXT: addi a1, zero, 6 +; RV64IB-NEXT: call __muldi3@plt +; RV64IB-NEXT: addw a0, a0, s0 +; RV64IB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IB-NEXT: addi sp, sp, 16 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw6: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi sp, sp, -16 +; RV64IBA-NEXT: .cfi_def_cfa_offset 16 +; RV64IBA-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: .cfi_offset ra, -8 +; RV64IBA-NEXT: .cfi_offset s0, -16 +; RV64IBA-NEXT: mv s0, a1 +; RV64IBA-NEXT: addi a1, zero, 6 +; RV64IBA-NEXT: call __muldi3@plt +; RV64IBA-NEXT: addw a0, a0, s0 +; RV64IBA-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: addi sp, sp, 16 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 6 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw7(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw7: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 3 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw7: +; RV64IB: # %bb.0: +; RV64IB-NEXT: slli a2, a0, 3 +; RV64IB-NEXT: sub a0, a2, a0 +; RV64IB-NEXT: addw a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw7: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: slli a2, a0, 3 +; RV64IBA-NEXT: sub a0, a2, a0 +; RV64IBA-NEXT: addw a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 7 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw9(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw9: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a2, a0, 3 +; RV64I-NEXT: add a0, a2, a0 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw9: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sh3add a0, a0, a0 +; RV64IB-NEXT: addw a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw9: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: sh3add a0, a0, a0 +; RV64IBA-NEXT: addw a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 9 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw10(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw10: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: addi a1, zero, 10 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: addw a0, a0, s0 +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw10: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi sp, sp, -16 +; RV64IB-NEXT: .cfi_def_cfa_offset 16 +; RV64IB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IB-NEXT: .cfi_offset ra, -8 +; RV64IB-NEXT: .cfi_offset s0, -16 +; RV64IB-NEXT: mv s0, a1 +; RV64IB-NEXT: addi a1, zero, 10 +; RV64IB-NEXT: call __muldi3@plt +; RV64IB-NEXT: addw a0, a0, s0 +; RV64IB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IB-NEXT: addi sp, sp, 16 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw10: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi sp, sp, -16 +; RV64IBA-NEXT: .cfi_def_cfa_offset 16 +; RV64IBA-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: .cfi_offset ra, -8 +; RV64IBA-NEXT: .cfi_offset s0, -16 +; RV64IBA-NEXT: mv s0, a1 +; RV64IBA-NEXT: addi a1, zero, 10 +; RV64IBA-NEXT: call __muldi3@plt +; RV64IBA-NEXT: addw a0, a0, s0 +; RV64IBA-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: addi sp, sp, 16 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 10 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw12(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw12: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: .cfi_offset s0, -16 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: addi a1, zero, 12 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: addw a0, a0, s0 +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw12: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi sp, sp, -16 +; RV64IB-NEXT: .cfi_def_cfa_offset 16 +; RV64IB-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IB-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IB-NEXT: .cfi_offset ra, -8 +; RV64IB-NEXT: .cfi_offset s0, -16 +; RV64IB-NEXT: mv s0, a1 +; RV64IB-NEXT: addi a1, zero, 12 +; RV64IB-NEXT: call __muldi3@plt +; RV64IB-NEXT: addw a0, a0, s0 +; RV64IB-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IB-NEXT: addi sp, sp, 16 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw12: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi sp, sp, -16 +; RV64IBA-NEXT: .cfi_def_cfa_offset 16 +; RV64IBA-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64IBA-NEXT: .cfi_offset ra, -8 +; RV64IBA-NEXT: .cfi_offset s0, -16 +; RV64IBA-NEXT: mv s0, a1 +; RV64IBA-NEXT: addi a1, zero, 12 +; RV64IBA-NEXT: call __muldi3@plt +; RV64IBA-NEXT: addw a0, a0, s0 +; RV64IBA-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IBA-NEXT: addi sp, sp, 16 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 12 + %d = add i32 %c, %b + ret i32 %d +}