Index: lib/Target/RISCV/RISCVInstrInfoM.td =================================================================== --- lib/Target/RISCV/RISCVInstrInfoM.td +++ lib/Target/RISCV/RISCVInstrInfoM.td @@ -49,3 +49,17 @@ def : PatGprGpr; def : PatGprGpr; } // Predicates = [HasStdExtM] + +let Predicates = [HasStdExtM, IsRV64] in { +def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32), + (MULW GPR:$rs1, GPR:$rs2)>; +def : Pat<(sext_inreg (udiv (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32), + (DIVUW GPR:$rs1, GPR:$rs2)>; +def : Pat<(udiv (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), + (DIVUW GPR:$rs1, GPR:$rs2)>; +def : Pat<(sdiv (sext_inreg GPR:$rs1, i32), (sext_inreg GPR:$rs2, i32)), + (DIVW GPR:$rs1, GPR:$rs2)>; +def : Pat<(sext_inreg (sdiv (sext_inreg GPR:$rs1, i32), + (sext_inreg GPR:$rs2, i32)), i32), + (DIVW GPR:$rs1, GPR:$rs2)>; +} // Predicates = [HasStdExtM, IsRV64] Index: test/CodeGen/RISCV/div.ll =================================================================== --- test/CodeGen/RISCV/div.ll +++ test/CodeGen/RISCV/div.ll @@ -3,6 +3,10 @@ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IM %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IM %s define i32 @udiv(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: udiv: @@ -18,6 +22,24 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: divu a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: udiv: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: call __udivdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: udiv: +; RV64IM: # %bb.0: +; RV64IM-NEXT: divuw a0, a0, a1 +; RV64IM-NEXT: ret %1 = udiv i32 %a, %b ret i32 %1 } @@ -40,6 +62,34 @@ ; RV32IM-NEXT: mulhu a0, a0, a1 ; RV32IM-NEXT: srli a0, a0, 2 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: udiv_constant: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __udivdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: udiv_constant: +; RV64IM: # %bb.0: +; RV64IM-NEXT: slli a0, a0, 32 +; RV64IM-NEXT: srli a0, a0, 32 +; RV64IM-NEXT: lui a1, 1035469 +; RV64IM-NEXT: addiw a1, a1, -819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, -819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, -819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, -819 +; RV64IM-NEXT: mulhu a0, a0, a1 +; RV64IM-NEXT: srli a0, a0, 2 +; RV64IM-NEXT: ret %1 = udiv i32 %a, 5 ret i32 %1 } @@ -54,6 +104,16 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: srli a0, a0, 3 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: udiv_pow2: +; RV64I: # %bb.0: +; RV64I-NEXT: srliw a0, a0, 3 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: udiv_pow2: +; RV64IM: # %bb.0: +; RV64IM-NEXT: srliw a0, a0, 3 +; RV64IM-NEXT: ret %1 = udiv i32 %a, 8 ret i32 %1 } @@ -76,6 +136,20 @@ ; RV32IM-NEXT: lw ra, 12(sp) ; RV32IM-NEXT: addi sp, sp, 16 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: udiv64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: call __udivdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: udiv64: +; RV64IM: # %bb.0: +; RV64IM-NEXT: divu a0, a0, a1 +; RV64IM-NEXT: ret %1 = udiv i64 %a, %b ret i64 %1 } @@ -102,6 +176,30 @@ ; RV32IM-NEXT: lw ra, 12(sp) ; RV32IM-NEXT: addi sp, sp, 16 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: udiv64_constant: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __udivdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: udiv64_constant: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 1035469 +; RV64IM-NEXT: addiw a1, a1, -819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, -819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, -819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, -819 +; RV64IM-NEXT: mulhu a0, a0, a1 +; RV64IM-NEXT: srli a0, a0, 2 +; RV64IM-NEXT: ret %1 = udiv i64 %a, 5 ret i64 %1 } @@ -120,6 +218,22 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: div a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: sdiv: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: sext.w a1, a1 +; RV64I-NEXT: call __divdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: sdiv: +; RV64IM: # %bb.0: +; RV64IM-NEXT: divw a0, a0, a1 +; RV64IM-NEXT: ret %1 = sdiv i32 %a, %b ret i32 %1 } @@ -144,6 +258,34 @@ ; RV32IM-NEXT: srai a0, a0, 1 ; RV32IM-NEXT: add a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: sdiv_constant: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __divdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: sdiv_constant: +; RV64IM: # %bb.0: +; RV64IM-NEXT: sext.w a0, a0 +; RV64IM-NEXT: lui a1, 13107 +; RV64IM-NEXT: addiw a1, a1, 819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, 819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, 819 +; RV64IM-NEXT: slli a1, a1, 13 +; RV64IM-NEXT: addi a1, a1, 1639 +; RV64IM-NEXT: mulh a0, a0, a1 +; RV64IM-NEXT: srli a1, a0, 63 +; RV64IM-NEXT: srai a0, a0, 1 +; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: ret %1 = sdiv i32 %a, 5 ret i32 %1 } @@ -164,6 +306,24 @@ ; RV32IM-NEXT: add a0, a0, a1 ; RV32IM-NEXT: srai a0, a0, 3 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: sdiv_pow2: +; RV64I: # %bb.0: +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: srli a1, a1, 60 +; RV64I-NEXT: andi a1, a1, 7 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: sraiw a0, a0, 3 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: sdiv_pow2: +; RV64IM: # %bb.0: +; RV64IM-NEXT: sext.w a1, a0 +; RV64IM-NEXT: srli a1, a1, 60 +; RV64IM-NEXT: andi a1, a1, 7 +; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: sraiw a0, a0, 3 +; RV64IM-NEXT: ret %1 = sdiv i32 %a, 8 ret i32 %1 } @@ -186,6 +346,20 @@ ; RV32IM-NEXT: lw ra, 12(sp) ; RV32IM-NEXT: addi sp, sp, 16 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: sdiv64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: call __divdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: sdiv64: +; RV64IM: # %bb.0: +; RV64IM-NEXT: div a0, a0, a1 +; RV64IM-NEXT: ret %1 = sdiv i64 %a, %b ret i64 %1 } @@ -212,6 +386,32 @@ ; RV32IM-NEXT: lw ra, 12(sp) ; RV32IM-NEXT: addi sp, sp, 16 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: sdiv64_constant: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __divdi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: sdiv64_constant: +; RV64IM: # %bb.0: +; RV64IM-NEXT: lui a1, 13107 +; RV64IM-NEXT: addiw a1, a1, 819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, 819 +; RV64IM-NEXT: slli a1, a1, 12 +; RV64IM-NEXT: addi a1, a1, 819 +; RV64IM-NEXT: slli a1, a1, 13 +; RV64IM-NEXT: addi a1, a1, 1639 +; RV64IM-NEXT: mulh a0, a0, a1 +; RV64IM-NEXT: srli a1, a0, 63 +; RV64IM-NEXT: srai a0, a0, 1 +; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: ret %1 = sdiv i64 %a, 5 ret i64 %1 } Index: test/CodeGen/RISCV/mul.ll =================================================================== --- test/CodeGen/RISCV/mul.ll +++ test/CodeGen/RISCV/mul.ll @@ -3,8 +3,12 @@ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IM %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IM %s -define i32 @square(i32 %a) nounwind { +define signext i32 @square(i32 %a) nounwind { ; RV32I-LABEL: square: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -19,11 +23,27 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: mul a0, a0, a0 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: square: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: square: +; RV64IM: # %bb.0: +; RV64IM-NEXT: mulw a0, a0, a0 +; RV64IM-NEXT: ret %1 = mul i32 %a, %a ret i32 %1 } -define i32 @mul(i32 %a, i32 %b) nounwind { +define signext i32 @mul(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: mul: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -37,11 +57,26 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mul: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mul: +; RV64IM: # %bb.0: +; RV64IM-NEXT: mulw a0, a0, a1 +; RV64IM-NEXT: ret %1 = mul i32 %a, %b ret i32 %1 } -define i32 @mul_constant(i32 %a) nounwind { +define signext i32 @mul_constant(i32 %a) nounwind { ; RV32I-LABEL: mul_constant: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -57,6 +92,23 @@ ; RV32IM-NEXT: addi a1, zero, 5 ; RV32IM-NEXT: mul a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mul_constant: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mul_constant: +; RV64IM: # %bb.0: +; RV64IM-NEXT: addi a1, zero, 5 +; RV64IM-NEXT: mulw a0, a0, a1 +; RV64IM-NEXT: ret %1 = mul i32 %a, 5 ret i32 %1 } @@ -71,6 +123,16 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 3 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mul_pow2: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a0, a0, 3 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mul_pow2: +; RV64IM: # %bb.0: +; RV64IM-NEXT: slli a0, a0, 3 +; RV64IM-NEXT: ret %1 = mul i32 %a, 8 ret i32 %1 } @@ -94,6 +156,20 @@ ; RV32IM-NEXT: add a1, a3, a1 ; RV32IM-NEXT: mul a0, a0, a2 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mul64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mul64: +; RV64IM: # %bb.0: +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret %1 = mul i64 %a, %b ret i64 %1 } @@ -118,6 +194,22 @@ ; RV32IM-NEXT: add a1, a3, a1 ; RV32IM-NEXT: mul a0, a0, a2 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mul64_constant: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: addi a1, zero, 5 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mul64_constant: +; RV64IM: # %bb.0: +; RV64IM-NEXT: addi a1, zero, 5 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: ret %1 = mul i64 %a, 5 ret i64 %1 } @@ -140,6 +232,26 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: mulh a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mulhs: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: sext.w a1, a1 +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mulhs: +; RV64IM: # %bb.0: +; RV64IM-NEXT: sext.w a1, a1 +; RV64IM-NEXT: sext.w a0, a0 +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: srli a0, a0, 32 +; RV64IM-NEXT: ret %1 = sext i32 %a to i64 %2 = sext i32 %b to i64 %3 = mul i64 %1, %2 @@ -148,7 +260,7 @@ ret i32 %5 } -define i32 @mulhu(i32 %a, i32 %b) nounwind { +define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind { ; RV32I-LABEL: mulhu: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -166,6 +278,22 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: mulhu a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: mulhu: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: call __muldi3 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: mulhu: +; RV64IM: # %bb.0: +; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: srli a0, a0, 32 +; RV64IM-NEXT: ret %1 = zext i32 %a to i64 %2 = zext i32 %b to i64 %3 = mul i64 %1, %2 Index: test/CodeGen/RISCV/rem.ll =================================================================== --- test/CodeGen/RISCV/rem.ll +++ test/CodeGen/RISCV/rem.ll @@ -3,6 +3,10 @@ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IM %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IM %s define i32 @urem(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: urem: @@ -18,6 +22,28 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: remu a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: urem: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: call __umoddi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: urem: +; RV64IM: # %bb.0: +; RV64IM-NEXT: slli a1, a1, 32 +; RV64IM-NEXT: srli a1, a1, 32 +; RV64IM-NEXT: slli a0, a0, 32 +; RV64IM-NEXT: srli a0, a0, 32 +; RV64IM-NEXT: remu a0, a0, a1 +; RV64IM-NEXT: ret %1 = urem i32 %a, %b ret i32 %1 } @@ -36,6 +62,24 @@ ; RV32IM: # %bb.0: ; RV32IM-NEXT: rem a0, a0, a1 ; RV32IM-NEXT: ret +; +; RV64I-LABEL: srem: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: sext.w a1, a1 +; RV64I-NEXT: call __moddi3 +; RV64I-NEXT: ld ra, 8(sp) +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; +; RV64IM-LABEL: srem: +; RV64IM: # %bb.0: +; RV64IM-NEXT: sext.w a1, a1 +; RV64IM-NEXT: sext.w a0, a0 +; RV64IM-NEXT: rem a0, a0, a1 +; RV64IM-NEXT: ret %1 = srem i32 %a, %b ret i32 %1 }