diff --git a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp
--- a/llvm/lib/Target/RISCV/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCallLowering.cpp
@@ -57,6 +57,40 @@
   }
 };
 
+struct IncomingValueHandler : public CallLowering::ValueHandler {
+  IncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+                       CCAssignFn *AssignFn)
+      : ValueHandler(B, MRI, AssignFn) {}
+
+  bool isIncomingArgumentHandler() const override { return true; }
+
+  Register getStackAddress(uint64_t Size, int64_t Offset,
+                           MachinePointerInfo &MPO) override {
+    llvm_unreachable("not implemented");
+  }
+
+  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
+                            MachinePointerInfo &MPO, CCValAssign &VA) override {
+    llvm_unreachable("not implemented");
+  }
+
+  void assignValueToReg(Register ValVReg, Register PhysReg,
+                        CCValAssign &VA) override {
+    // Copy argument received in physical register to desired VReg.
+    MIRBuilder.getMBB().addLiveIn(PhysReg);
+    MIRBuilder.buildCopy(ValVReg, PhysReg);
+  }
+
+  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
+                 CCValAssign::LocInfo LocInfo,
+                 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+                 CCState &State) override {
+    if (AssignFn)
+      return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
+    return false;
+  }
+};
+
 } // namespace
 
 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
@@ -122,10 +156,73 @@
     MachineIRBuilder &MIRBuilder, const Function &F,
     ArrayRef<ArrayRef<Register>> VRegs) const {
 
+  // Early exit if there are no arguments.
   if (F.arg_empty())
     return true;
 
-  return false;
+  // TODO: Support vararg functions.
+  if (F.isVarArg())
+    return false;
+
+  // TODO: Support all argument types.
+  for (auto &Arg : F.args()) {
+    if (Arg.getType()->isIntegerTy())
+      continue;
+    if (Arg.getType()->isPointerTy())
+      continue;
+    if (Arg.getType()->isFloatingPointTy())
+      continue;
+    return false;
+  }
+
+  MachineFunction &MF = MIRBuilder.getMF();
+  const DataLayout &DL = MF.getDataLayout();
+  const RISCVTargetLowering &TLI = *getTLI<RISCVTargetLowering>();
+
+  SmallVector<ArgInfo, 32> SplitArgInfos;
+  SmallVector<ISD::InputArg, 8> Ins;
+  unsigned Index = 0;
+  for (auto &Arg : F.args()) {
+    // Construct the ArgInfo object from destination register and argument type.
+    ArgInfo AInfo(VRegs[Index], Arg.getType());
+    setArgFlags(AInfo, Index + AttributeList::FirstArgIndex, DL, F);
+
+    SmallVector<EVT, 4> SplitEVTs;
+    ComputeValueVTs(TLI, DL, Arg.getType(), SplitEVTs);
+    assert(VRegs[Index].size() == SplitEVTs.size() &&
+           "For each split Type there should be exactly one VReg.");
+
+    setISDArgsForCallingConv(F, AInfo, SplitEVTs, Ins, /*isRet=*/false);
+
+    // Handle any required merging from split value types - as indicated in
+    // SplitEVTs - from physical registers into the desired VReg. ArgInfo
+    // objects are constructed correspondingly and appended to SplitArgInfos.
+    splitToValueTypes(AInfo, SplitArgInfos, SplitEVTs, MF,
+                      [&](ArrayRef<Register> Regs, int SplitIdx) {
+                        auto MIB =
+                            MIRBuilder.buildMerge(VRegs[Index][SplitIdx], Regs);
+                        MIRBuilder.setInstr(*MIB.getInstr());
+                      });
+
+    ++Index;
+  }
+
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
+
+  // TODO: Access CC_RISCV_FastCC when using CallingConv::Fast. Preferrably
+  // TLI.CCAssignFnForCall will be implemented and the approach for both CCs
+  // will be unified.
+  if (F.getCallingConv() == CallingConv::Fast)
+    return false;
+  TLI.analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
+
+  IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo(), nullptr);
+
+  if (!handleAssignments(CCInfo, ArgLocs, MIRBuilder, SplitArgInfos, Handler))
+    return false;
+
+  return true;
 }
 
 bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator-calllowering.ll
@@ -141,3 +141,159 @@
 entry:
   ret [2 x i32] [i32 1, i32 2]
 }
+
+define void @test_args_i8(i8 %a) {
+  ; RV32I-LABEL: name: test_args_i8
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[VREG:%[0-9]+]]:_(s8) = G_TRUNC [[VREG1]](s32)
+  ; RV32I-NEXT:   [[CST:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s8) = G_ADD [[VREG]], [[CST]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_i8
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[VREG:%[0-9]+]]:_(s8) = G_TRUNC [[VREG1]](s64)
+  ; RV64I-NEXT:   [[CST:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s8) = G_ADD [[VREG]], [[CST]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i8 %a, 1
+  ret void
+}
+
+define void @test_args_i16(i16 %a) {
+  ; RV32I-LABEL: name: test_args_i16
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[VREG:%[0-9]+]]:_(s16) = G_TRUNC [[VREG1]](s32)
+  ; RV32I-NEXT:   [[CST:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s16) = G_ADD [[VREG]], [[CST]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_i16
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[VREG:%[0-9]+]]:_(s16) = G_TRUNC [[VREG1]](s64)
+  ; RV64I-NEXT:   [[CST:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s16) = G_ADD [[VREG]], [[CST]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i16 %a, 1
+  ret void
+}
+
+define void @test_args_i32(i32 %a) {
+  ; RV32I-LABEL: name: test_args_i32
+  ; RV32I:        [[VREG:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s32) = G_ADD [[VREG]], [[CST]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_i32
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[VREG:%[0-9]+]]:_(s32) = G_TRUNC [[VREG1]](s64)
+  ; RV64I-NEXT:   [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s32) = G_ADD [[VREG]], [[CST]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i32 %a, 1
+  ret void
+}
+
+define void @test_args_i64(i64 %a) {
+  ; RV32I-LABEL: name: test_args_i64
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[VREG2:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV32I-NEXT:   [[VREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREG1]](s32), [[VREG2]](s32)
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s64) = G_ADD [[VREG]], [[CST]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_i64
+  ; RV64I:        [[VREG:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s64) = G_ADD [[VREG]], [[CST]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i64 %a, 1
+  ret void
+}
+
+define void @test_args_2xi8(i8 %a, i8 %b) {
+  ; RV32I-LABEL: name: test_args_2xi8
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[TVREG1:%[0-9]+]]:_(s8) = G_TRUNC [[VREG1]](s32)
+  ; RV32I-NEXT:   [[VREG2:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[TVREG2:%[0-9]+]]:_(s8) = G_TRUNC [[VREG2]](s32)
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s8) = G_ADD [[TVREG1]], [[TVREG2]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_2xi8
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TVREG1:%[0-9]+]]:_(s8) = G_TRUNC [[VREG1]](s64)
+  ; RV64I-NEXT:   [[VREG2:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[TVREG2:%[0-9]+]]:_(s8) = G_TRUNC [[VREG2]](s64)
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s8) = G_ADD [[TVREG1]], [[TVREG2]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i8 %a, %b
+  ret void
+}
+
+define void @test_args_2xi16(i16 %a, i16 %b) {
+  ; RV32I-LABEL: name: test_args_2xi16
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[TVREG1:%[0-9]+]]:_(s16) = G_TRUNC [[VREG1]](s32)
+  ; RV32I-NEXT:   [[VREG2:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[TVREG2:%[0-9]+]]:_(s16) = G_TRUNC [[VREG2]](s32)
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s16) = G_ADD [[TVREG1]], [[TVREG2]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_2xi16
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TVREG1:%[0-9]+]]:_(s16) = G_TRUNC [[VREG1]](s64)
+  ; RV64I-NEXT:   [[VREG2:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[TVREG2:%[0-9]+]]:_(s16) = G_TRUNC [[VREG2]](s64)
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s16) = G_ADD [[TVREG1]], [[TVREG2]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i16 %a, %b
+  ret void
+}
+
+define void @test_args_2xi32(i32 %a, i32 %b) {
+  ; RV32I-LABEL: name: test_args_2xi32
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[VREG2:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s32) = G_ADD [[VREG1]], [[VREG2]]
+
+  ; RV64I-LABEL: name: test_args_2xi32
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TVREG1:%[0-9]+]]:_(s32) = G_TRUNC [[VREG1]](s64)
+  ; RV64I-NEXT:   [[VREG2:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[TVREG2:%[0-9]+]]:_(s32) = G_TRUNC [[VREG2]](s64)
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s32) = G_ADD [[TVREG1]], [[TVREG2]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i32 %a, %b
+  ret void
+}
+
+define void @test_args_2xi64(i64 %a, i64 %b) {
+  ; RV32I-LABEL: name: test_args_2xi64
+  ; RV32I:        [[VREG1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[VREG2:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[VREG3:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32I-NEXT:   [[VREG4:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32I-NEXT:   [[EVREG2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREG3]](s32), [[VREG4]](s32)
+  ; RV32I-NEXT:   [[EVREG1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[VREG1]](s32), [[VREG2]](s32)
+  ; RV32I-NEXT:   {{%[0-9]+}}:_(s64) = G_ADD [[EVREG1]], [[EVREG2]]
+  ; RV32I-NEXT:   PseudoRET
+
+  ; RV64I-LABEL: name: test_args_2xi64
+  ; RV64I:        [[VREG1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[VREG2:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   {{%[0-9]+}}:_(s64) = G_ADD [[VREG1]], [[VREG2]]
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  %0 = add i64 %a, %b
+  ret void
+}