diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -970,7 +970,7 @@ setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND, ISD::OR, ISD::XOR, ISD::SETCC}); if (Subtarget.is64Bit()) - setTargetDAGCombine(ISD::SRA); + setTargetDAGCombine({ISD::SRA, ISD::SIGN_EXTEND_INREG}); if (Subtarget.hasStdExtF()) setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM}); @@ -8195,6 +8195,13 @@ const RISCVSubtarget &Subtarget) { SDValue Src = N->getOperand(0); EVT VT = N->getValueType(0); + // Fold (sign_extend_inreg(CopyFromReg(callseq_end X), X10), i32) -> + // (CopyFromReg(callseq_end X), X10) + if (Subtarget.is64Bit() && Src.getOpcode() == ISD::CopyFromReg && + cast(N->getOperand(1))->getVT() == MVT::i32 && + Src.getOperand(0).getOpcode() == ISD::CALLSEQ_END) { + return Src; + } // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X) if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH && diff --git a/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp b/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp --- a/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp +++ b/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp @@ -328,8 +328,6 @@ return RVFI->isSExt32Register(VReg); } - // TODO: Handle returns from calls? - Register SrcReg = MI->getOperand(1).getReg(); // If this is a copy from another register, check its source instruction. diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -106,7 +106,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __atomic_load_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2252,7 +2251,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_exchange_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2287,7 +2285,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_add_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2323,7 +2320,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_sub_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2359,7 +2355,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_and_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2401,7 +2396,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_nand_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2443,7 +2437,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_or_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -2478,7 +2471,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: call __atomic_fetch_xor_4@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll @@ -177,7 +177,6 @@ ; RV64I-FPELIM-NEXT: addi sp, sp, -16 ; RV64I-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-FPELIM-NEXT: call callee_tiny_scalar_ret@plt -; RV64I-FPELIM-NEXT: sext.w a0, a0 ; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-FPELIM-NEXT: addi sp, sp, 16 ; RV64I-FPELIM-NEXT: ret @@ -189,7 +188,6 @@ ; RV64I-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64I-WITHFP-NEXT: addi s0, sp, 16 ; RV64I-WITHFP-NEXT: call callee_tiny_scalar_ret@plt -; RV64I-WITHFP-NEXT: sext.w a0, a0 ; RV64I-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64I-WITHFP-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll --- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll +++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll @@ -86,7 +86,6 @@ ; RV64-ALL-NEXT: addi sp, sp, -16 ; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-ALL-NEXT: call __muldi3@plt -; RV64-ALL-NEXT: sext.w a0, a0 ; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-ALL-NEXT: addi sp, sp, 16 ; RV64-ALL-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -30,7 +30,6 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -63,7 +62,6 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __muldi3@plt -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret