Index: llvm/test/CodeGen/RISCV/rv32zba.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv32zba.ll +++ llvm/test/CodeGen/RISCV/rv32zba.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32I -; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-b -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32IB -; RUN: llc -mtriple=riscv32 -mattr=+experimental-zba -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-zba -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32IBA define signext i16 @sh1add(i64 %0, i16* %1) { @@ -80,3 +80,81 @@ %4 = load i64, i64* %3 ret i64 %4 } + +define i32 @addmul6(i32 %a, i32 %b) { +; RV32I-LABEL: addmul6: +; RV32I: # %bb.0: +; RV32I-NEXT: addi a2, zero, 6 +; RV32I-NEXT: mul a0, a0, a2 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul6: +; RV32IB: # %bb.0: +; RV32IB-NEXT: addi a2, zero, 6 +; RV32IB-NEXT: mul a0, a0, a2 +; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul6: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: addi a2, zero, 6 +; RV32IBA-NEXT: mul a0, a0, a2 +; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 6 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul10(i32 %a, i32 %b) { +; RV32I-LABEL: addmul10: +; RV32I: # %bb.0: +; RV32I-NEXT: addi a2, zero, 10 +; RV32I-NEXT: mul a0, a0, a2 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul10: +; RV32IB: # %bb.0: +; RV32IB-NEXT: addi a2, zero, 10 +; RV32IB-NEXT: mul a0, a0, a2 +; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul10: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: addi a2, zero, 10 +; RV32IBA-NEXT: mul a0, a0, a2 +; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 10 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmul12(i32 %a, i32 %b) { +; RV32I-LABEL: addmul12: +; RV32I: # %bb.0: +; RV32I-NEXT: addi a2, zero, 12 +; RV32I-NEXT: mul a0, a0, a2 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: ret +; +; RV32IB-LABEL: addmul12: +; RV32IB: # %bb.0: +; RV32IB-NEXT: addi a2, zero, 12 +; RV32IB-NEXT: mul a0, a0, a2 +; RV32IB-NEXT: add a0, a0, a1 +; RV32IB-NEXT: ret +; +; RV32IBA-LABEL: addmul12: +; RV32IBA: # %bb.0: +; RV32IBA-NEXT: addi a2, zero, 12 +; RV32IBA-NEXT: mul a0, a0, a2 +; RV32IBA-NEXT: add a0, a0, a1 +; RV32IBA-NEXT: ret + %c = mul i32 %a, 12 + %d = add i32 %c, %b + ret i32 %d +} Index: llvm/test/CodeGen/RISCV/rv64zba.ll =================================================================== --- llvm/test/CodeGen/RISCV/rv64zba.ll +++ llvm/test/CodeGen/RISCV/rv64zba.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64I -; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-b -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64IB -; RUN: llc -mtriple=riscv64 -mattr=+experimental-zba -verify-machineinstrs < %s \ +; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-zba -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV64IBA define i64 @slliuw(i64 %a) nounwind { @@ -370,3 +370,159 @@ %5 = add i64 %4, %1 ret i64 %5 } + +define i64 @addmul6(i64 %a, i64 %b) { +; RV64I-LABEL: addmul6: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, 6 +; RV64I-NEXT: mul a0, a0, a2 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul6: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi a2, zero, 6 +; RV64IB-NEXT: mul a0, a0, a2 +; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul6: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi a2, zero, 6 +; RV64IBA-NEXT: mul a0, a0, a2 +; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 6 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul10(i64 %a, i64 %b) { +; RV64I-LABEL: addmul10: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, 10 +; RV64I-NEXT: mul a0, a0, a2 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul10: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi a2, zero, 10 +; RV64IB-NEXT: mul a0, a0, a2 +; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul10: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi a2, zero, 10 +; RV64IBA-NEXT: mul a0, a0, a2 +; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 10 + %d = add i64 %c, %b + ret i64 %d +} + +define i64 @addmul12(i64 %a, i64 %b) { +; RV64I-LABEL: addmul12: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, 12 +; RV64I-NEXT: mul a0, a0, a2 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmul12: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi a2, zero, 12 +; RV64IB-NEXT: mul a0, a0, a2 +; RV64IB-NEXT: add a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmul12: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi a2, zero, 12 +; RV64IBA-NEXT: mul a0, a0, a2 +; RV64IBA-NEXT: add a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i64 %a, 12 + %d = add i64 %c, %b + ret i64 %d +} + +define i32 @addmulw6(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw6: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, 6 +; RV64I-NEXT: mul a0, a0, a2 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw6: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi a2, zero, 6 +; RV64IB-NEXT: mul a0, a0, a2 +; RV64IB-NEXT: addw a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw6: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi a2, zero, 6 +; RV64IBA-NEXT: mul a0, a0, a2 +; RV64IBA-NEXT: addw a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 6 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw10(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw10: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, 10 +; RV64I-NEXT: mul a0, a0, a2 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw10: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi a2, zero, 10 +; RV64IB-NEXT: mul a0, a0, a2 +; RV64IB-NEXT: addw a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw10: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi a2, zero, 10 +; RV64IBA-NEXT: mul a0, a0, a2 +; RV64IBA-NEXT: addw a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 10 + %d = add i32 %c, %b + ret i32 %d +} + +define i32 @addmulw12(i32 signext %a, i32 signext %b) { +; RV64I-LABEL: addmulw12: +; RV64I: # %bb.0: +; RV64I-NEXT: addi a2, zero, 12 +; RV64I-NEXT: mul a0, a0, a2 +; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: ret +; +; RV64IB-LABEL: addmulw12: +; RV64IB: # %bb.0: +; RV64IB-NEXT: addi a2, zero, 12 +; RV64IB-NEXT: mul a0, a0, a2 +; RV64IB-NEXT: addw a0, a0, a1 +; RV64IB-NEXT: ret +; +; RV64IBA-LABEL: addmulw12: +; RV64IBA: # %bb.0: +; RV64IBA-NEXT: addi a2, zero, 12 +; RV64IBA-NEXT: mul a0, a0, a2 +; RV64IBA-NEXT: addw a0, a0, a1 +; RV64IBA-NEXT: ret + %c = mul i32 %a, 12 + %d = add i32 %c, %b + ret i32 %d +}