diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -7190,8 +7190,10 @@ while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { CCValAssign &PartVA = ArgLocs[i + 1]; unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset; - SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, - DAG.getIntPtrConstant(PartOffset, DL)); + SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); + if (PartVA.getValVT().isScalableVector()) + Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); + SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset); InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, MachinePointerInfo())); ++i; @@ -7473,14 +7475,17 @@ // Calculate the total size to store. We don't have access to what we're // actually storing other than performing the loop and collecting the // info. - SmallVector> Parts; + SmallVector> Parts; while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { SDValue PartValue = OutVals[i + 1]; unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset; + SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); EVT PartVT = PartValue.getValueType(); + if (PartVT.isScalableVector()) + Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); StoredSize += PartVT.getStoreSize(); StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG)); - Parts.push_back(std::make_pair(PartValue, PartOffset)); + Parts.push_back(std::make_pair(PartValue, Offset)); ++i; } SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign); @@ -7490,9 +7495,9 @@ MachinePointerInfo::getFixedStack(MF, FI))); for (const auto &Part : Parts) { SDValue PartValue = Part.first; - unsigned PartOffset = Part.second; - SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, - DAG.getIntPtrConstant(PartOffset, DL)); + SDValue PartOffset = Part.second; + SDValue Address = + DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset); MemOpChains.push_back( DAG.getStore(Chain, DL, PartValue, Address, MachinePointerInfo::getFixedStack(MF, FI))); diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -8,7 +8,9 @@ define @callee_scalable_vector_split_indirect( %x, %y) { ; RV32-LABEL: callee_scalable_vector_split_indirect: ; RV32: # %bb.0: -; RV32-NEXT: addi a1, a0, 64 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 3 +; RV32-NEXT: add a1, a0, a1 ; RV32-NEXT: vl8re32.v v24, (a0) ; RV32-NEXT: vl8re32.v v0, (a1) ; RV32-NEXT: vsetvli a0, zero, e32,m8,ta,mu @@ -18,7 +20,9 @@ ; ; RV64-LABEL: callee_scalable_vector_split_indirect: ; RV64: # %bb.0: -; RV64-NEXT: addi a1, a0, 64 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: vl8re32.v v24, (a0) ; RV64-NEXT: vl8re32.v v0, (a1) ; RV64-NEXT: vsetvli a0, zero, e32,m8,ta,mu @@ -41,7 +45,10 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: sub sp, sp, a0 -; RV32-NEXT: addi a0, sp, 96 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: addi a1, sp, 32 +; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: vs8r.v v16, (a0) ; RV32-NEXT: addi a0, sp, 32 ; RV32-NEXT: vs8r.v v8, (a0) @@ -66,7 +73,10 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: sub sp, sp, a0 -; RV64-NEXT: addi a0, sp, 88 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: addi a1, sp, 24 +; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: vs8r.v v16, (a0) ; RV64-NEXT: addi a0, sp, 24 ; RV64-NEXT: vs8r.v v8, (a0)