Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -3996,7 +3996,8 @@ } if (LC != RTLIB::UNKNOWN_LIBCALL && TLI.getLibcallName(LC)) { - SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue ShAmt = DAG.getAnyExtOrTrunc(N->getOperand(1), dl, MVT::i32); + SDValue Ops[2] = { N->getOperand(0), ShAmt }; TargetLowering::MakeLibCallOptions CallOptions; CallOptions.setSExt(isSigned); SplitInteger(TLI.makeLibCall(DAG, LC, VT, Ops, CallOptions, dl).first, Lo, Hi); Index: llvm/test/CodeGen/RISCV/shifts.ll =================================================================== --- llvm/test/CodeGen/RISCV/shifts.ll +++ llvm/test/CodeGen/RISCV/shifts.ll @@ -7,6 +7,8 @@ ; Basic shift support is tested as part of ALU.ll. This file ensures that ; shifts which may not be supported natively are lowered properly. +declare i64 @llvm.fshr.i64(i64, i64, i64) + define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: ; RV32I: # %bb.0: @@ -142,6 +144,50 @@ ret i64 %1 } +define i64 @fshr64_minsize(i64 %a, i64 %b) minsize nounwind { +; RV32I-LABEL: fshr64_minsize: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: andi a2, a2, 63 +; RV32I-NEXT: call __lshrdi3@plt +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv s4, a1 +; RV32I-NEXT: neg a0, s0 +; RV32I-NEXT: andi a2, a0, 63 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: call __ashldi3@plt +; RV32I-NEXT: or a0, s3, a0 +; RV32I-NEXT: or a1, s4, a1 +; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fshr64_minsize: +; RV64I: # %bb.0: +; RV64I-NEXT: srl a2, a0, a1 +; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: sll a0, a0, a1 +; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: ret + %res = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b) + ret i64 %res +} + define i128 @lshr128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: lshr128: ; RV32I: # %bb.0: