diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -54,6 +54,8 @@ return selectShiftMask(N, 32, ShAmt); } + bool selectSExti32(SDValue N, SDValue &Val); + bool MatchSRLIW(SDNode *N) const; bool MatchSLLIUW(SDNode *N) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -986,6 +986,27 @@ return true; } +bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) { + if (N.getOpcode() == ISD::SIGN_EXTEND_INREG && + cast(N.getOperand(1))->getVT() == MVT::i32) { + Val = N.getOperand(0); + return true; + } + // FIXME: Should we just call computeNumSignBits here? + if (N.getOpcode() == ISD::AssertSext && + cast(N->getOperand(1))->getVT().bitsLE(MVT::i32)) { + Val = N; + return true; + } + if (N.getOpcode() == ISD::AssertZext && + cast(N->getOperand(1))->getVT().bitsLT(MVT::i32)) { + Val = N; + return true; + } + + return false; +} + // Match (srl (and val, mask), imm) where the result would be a // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result // is equivalent to this (SimplifyDemandedBits may have removed lower bits diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -848,9 +848,7 @@ def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{ return cast(N->getOperand(1))->getVT().bitsLE(MVT::i32); }]>; -def sexti32 : PatFrags<(ops node:$src), - [(sext_inreg node:$src, i32), - (assertsexti32 node:$src)]>; +def sexti32 : ComplexPattern; def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{ return cast(N->getOperand(1))->getVT().bitsLE(MVT::i32); }]>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -356,7 +356,7 @@ (FCVT_WU_D $rs1, 0b001)>; // [u]int32->fp -def : Pat<(sint_to_fp (i64 (sexti32 GPR:$rs1))), (FCVT_D_W $rs1)>; +def : Pat<(sint_to_fp (i64 (sexti32 i64:$rs1))), (FCVT_D_W $rs1)>; def : Pat<(uint_to_fp (i64 (zexti32 GPR:$rs1))), (FCVT_D_WU $rs1)>; def : Pat<(i64 (fp_to_sint FPR64:$rs1)), (FCVT_L_D FPR64:$rs1, 0b001)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -400,7 +400,7 @@ def : Pat<(i64 (fp_to_uint FPR32:$rs1)), (FCVT_LU_S $rs1, 0b001)>; // [u]int->fp. Match GCC and default to using dynamic rounding mode. -def : Pat<(sint_to_fp (i64 (sexti32 GPR:$rs1))), (FCVT_S_W $rs1, 0b111)>; +def : Pat<(sint_to_fp (i64 (sexti32 i64:$rs1))), (FCVT_S_W $rs1, 0b111)>; def : Pat<(uint_to_fp (i64 (zexti32 GPR:$rs1))), (FCVT_S_WU $rs1, 0b111)>; def : Pat<(sint_to_fp (i64 GPR:$rs1)), (FCVT_S_L $rs1, 0b111)>; def : Pat<(uint_to_fp (i64 GPR:$rs1)), (FCVT_S_LU $rs1, 0b111)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td @@ -91,9 +91,9 @@ // Although the sexti32 operands may not have originated from an i32 srem, // this pattern is safe as it is impossible for two sign extended inputs to // produce a result where res[63:32]=0 and res[31]=1. -def : Pat<(srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)), +def : Pat<(srem (sexti32 i64:$rs1), (sexti32 i64:$rs2)), (REMW GPR:$rs1, GPR:$rs2)>; -def : Pat<(sext_inreg (srem (sexti32 GPR:$rs1), - (sexti32 GPR:$rs2)), i32), +def : Pat<(sext_inreg (srem (sexti32 i64:$rs1), + (sexti32 i64:$rs2)), i32), (REMW GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtM, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -357,7 +357,7 @@ def : Pat<(i64 (fp_to_uint FPR16:$rs1)), (FCVT_LU_H $rs1, 0b001)>; // [u]int->fp. Match GCC and default to using dynamic rounding mode. -def : Pat<(sint_to_fp (i64 (sexti32 GPR:$rs1))), (FCVT_H_W $rs1, 0b111)>; +def : Pat<(sint_to_fp (i64 (sexti32 i64:$rs1))), (FCVT_H_W $rs1, 0b111)>; def : Pat<(uint_to_fp (i64 (zexti32 GPR:$rs1))), (FCVT_H_WU $rs1, 0b111)>; def : Pat<(sint_to_fp (i64 GPR:$rs1)), (FCVT_H_L $rs1, 0b111)>; def : Pat<(uint_to_fp (i64 GPR:$rs1)), (FCVT_H_LU $rs1, 0b111)>; diff --git a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll --- a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll +++ b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll @@ -1113,7 +1113,7 @@ define signext i32 @sext_i32_remw_zext_sext_i16(i16 zeroext %0, i16 signext %1) nounwind { ; RV64IM-LABEL: sext_i32_remw_zext_sext_i16: ; RV64IM: # %bb.0: -; RV64IM-NEXT: rem a0, a0, a1 +; RV64IM-NEXT: remw a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 32 ; RV64IM-NEXT: srli a0, a0, 32 ; RV64IM-NEXT: ret @@ -1126,8 +1126,7 @@ define signext i32 @sext_i32_remw_sext_zext_i16(i16 signext %a, i16 zeroext %b) nounwind { ; RV64IM-LABEL: sext_i32_remw_sext_zext_i16: ; RV64IM: # %bb.0: -; RV64IM-NEXT: rem a0, a0, a1 -; RV64IM-NEXT: sext.w a0, a0 +; RV64IM-NEXT: remw a0, a0, a1 ; RV64IM-NEXT: ret %1 = sext i16 %a to i32 %2 = zext i16 %b to i32