diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -1580,3 +1580,55 @@ %r = and i8 %a, 15 ret i8 %r } + +define i8 @mulsub(i8 %x, i8 %y) nounwind { +; RV32I-LABEL: mulsub: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: li a1, 14 +; RV32I-NEXT: call __mulsi3@plt +; RV32I-NEXT: sub a0, s0, a0 +; RV32I-NEXT: andi a0, a0, 15 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV32IM-LABEL: mulsub: +; RV32IM: # %bb.0: +; RV32IM-NEXT: li a2, 14 +; RV32IM-NEXT: mul a0, a0, a2 +; RV32IM-NEXT: sub a0, a1, a0 +; RV32IM-NEXT: andi a0, a0, 15 +; RV32IM-NEXT: ret +; +; RV64I-LABEL: mulsub: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: li a1, 14 +; RV64I-NEXT: call __muldi3@plt +; RV64I-NEXT: subw a0, s0, a0 +; RV64I-NEXT: andi a0, a0, 15 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mulsub: +; RV64IM: # %bb.0: +; RV64IM-NEXT: li a2, 14 +; RV64IM-NEXT: mulw a0, a0, a2 +; RV64IM-NEXT: subw a0, a1, a0 +; RV64IM-NEXT: andi a0, a0, 15 +; RV64IM-NEXT: ret + %m = mul i8 %x, 14 + %a = sub i8 %y, %m + %r = and i8 %a, 15 + ret i8 %r +}