diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -11647,9 +11647,18 @@ return true; } -static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) { - return DAG.getDataLayout().getPrefTypeAlign( +// Used to determine alignment for stack temporary in LowerCall. +static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { + Align PrefAlign = DAG.getDataLayout().getPrefTypeAlign( VT.getTypeForEVT(*DAG.getContext())); + + // Limit scalable vector alignment to the stack alignment. + if (VT.isScalableVector()) { + const RISCVFrameLowering *TFI = Subtarget.getFrameLowering(); + PrefAlign = std::min(PrefAlign, TFI->getStackAlign()); + } + + return PrefAlign; } // Lower a call to a callseq_start + CALL + callseq_end chain, and add input @@ -11767,8 +11776,8 @@ if (VA.getLocInfo() == CCValAssign::Indirect) { // Store the argument in a stack slot and pass its address. Align StackAlign = - std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG), - getPrefTypeAlign(ArgValue.getValueType(), DAG)); + std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG, Subtarget), + getPrefTypeAlign(ArgValue.getValueType(), DAG, Subtarget)); TypeSize StoredSize = ArgValue.getValueType().getStoreSize(); // If the original argument was split (e.g. i128), we need // to store the required parts of it here (and pass just one address). @@ -11790,7 +11799,7 @@ if (PartVT.isScalableVector()) Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); StoredSize += PartVT.getStoreSize(); - StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG)); + StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG, Subtarget)); Parts.push_back(std::make_pair(PartValue, Offset)); ++i; } diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -286,64 +286,62 @@ define fastcc @ret_nxv32i32_call_nxv32i32_nxv32i32_i32( %x, %y, i32 %w) { ; RV32-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_i32: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -144 -; RV32-NEXT: .cfi_def_cfa_offset 144 -; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 -; RV32-NEXT: addi s0, sp, 144 -; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: sub sp, sp, a1 -; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a3, a0, a1 ; RV32-NEXT: vl8re32.v v24, (a3) ; RV32-NEXT: vl8re32.v v0, (a0) -; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: add a1, a0, a1 -; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: li a3, 2 ; RV32-NEXT: vs8r.v v16, (a1) ; RV32-NEXT: vmv8r.v v8, v0 ; RV32-NEXT: vmv8r.v v16, v24 ; RV32-NEXT: call ext2@plt -; RV32-NEXT: addi sp, s0, -144 -; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload -; RV32-NEXT: addi sp, sp, 144 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_i32: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -144 -; RV64-NEXT: .cfi_def_cfa_offset 144 -; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: addi sp, sp, -32 +; RV64-NEXT: .cfi_def_cfa_offset 32 +; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 -; RV64-NEXT: addi s0, sp, 144 -; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: sub sp, sp, a1 -; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a3, a0, a1 ; RV64-NEXT: vl8re32.v v24, (a3) ; RV64-NEXT: vl8re32.v v0, (a0) -; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: add a1, a0, a1 -; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: li a3, 2 ; RV64-NEXT: vs8r.v v16, (a1) ; RV64-NEXT: vmv8r.v v8, v0 ; RV64-NEXT: vmv8r.v v16, v24 ; RV64-NEXT: call ext2@plt -; RV64-NEXT: addi sp, s0, -144 -; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 144 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 32 ; RV64-NEXT: ret %t = call fastcc @ext2( %y, %x, i32 %w, i32 2) ret %t @@ -352,17 +350,14 @@ define fastcc @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_i32( %x, %y, %z, i32 %w) { ; RV32-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_i32: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -144 -; RV32-NEXT: .cfi_def_cfa_offset 144 -; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 -; RV32-NEXT: addi s0, sp, 144 -; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a3, 48 ; RV32-NEXT: mul a1, a1, a3 ; RV32-NEXT: sub sp, sp, a1 -; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a3, a2, a1 @@ -370,23 +365,23 @@ ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 3 ; RV32-NEXT: add a3, sp, a3 -; RV32-NEXT: addi a3, a3, 128 +; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV32-NEXT: add a3, a0, a1 ; RV32-NEXT: vl8re32.v v24, (a3) -; RV32-NEXT: addi a3, sp, 128 +; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vl8re32.v v0, (a2) ; RV32-NEXT: vl8re32.v v24, (a0) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 5 ; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 128 +; RV32-NEXT: addi a2, a2, 16 ; RV32-NEXT: vs8r.v v24, (a2) ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: vs8r.v v16, (a0) @@ -394,40 +389,40 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add a0, sp, a0 -; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 128 +; RV32-NEXT: addi a2, a2, 16 ; RV32-NEXT: li a5, 42 -; RV32-NEXT: addi a3, sp, 128 +; RV32-NEXT: addi a3, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload ; RV32-NEXT: vs8r.v v8, (a1) ; RV32-NEXT: vmv8r.v v8, v0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 -; RV32-NEXT: addi a1, a1, 128 +; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: call ext3@plt -; RV32-NEXT: addi sp, s0, -144 -; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload -; RV32-NEXT: addi sp, sp, 144 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_i32: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -144 -; RV64-NEXT: .cfi_def_cfa_offset 144 -; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: addi sp, sp, -48 +; RV64-NEXT: .cfi_def_cfa_offset 48 +; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 -; RV64-NEXT: addi s0, sp, 144 -; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a3, 48 ; RV64-NEXT: mul a1, a1, a3 ; RV64-NEXT: sub sp, sp, a1 -; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a3, a2, a1 @@ -435,23 +430,23 @@ ; RV64-NEXT: csrr a3, vlenb ; RV64-NEXT: slli a3, a3, 3 ; RV64-NEXT: add a3, sp, a3 -; RV64-NEXT: addi a3, a3, 128 +; RV64-NEXT: addi a3, a3, 32 ; RV64-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV64-NEXT: add a3, a0, a1 ; RV64-NEXT: vl8re32.v v24, (a3) -; RV64-NEXT: addi a3, sp, 128 +; RV64-NEXT: addi a3, sp, 32 ; RV64-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill ; RV64-NEXT: vl8re32.v v0, (a2) ; RV64-NEXT: vl8re32.v v24, (a0) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 5 ; RV64-NEXT: add a2, sp, a2 -; RV64-NEXT: addi a2, a2, 128 +; RV64-NEXT: addi a2, a2, 32 ; RV64-NEXT: vs8r.v v24, (a2) ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vs8r.v v16, (a0) @@ -459,25 +454,28 @@ ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: addi a0, a0, 32 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 4 ; RV64-NEXT: add a2, sp, a2 -; RV64-NEXT: addi a2, a2, 128 +; RV64-NEXT: addi a2, a2, 32 ; RV64-NEXT: li a5, 42 -; RV64-NEXT: addi a3, sp, 128 +; RV64-NEXT: addi a3, sp, 32 ; RV64-NEXT: vl8re8.v v8, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vs8r.v v8, (a1) ; RV64-NEXT: vmv8r.v v8, v0 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: addi a1, a1, 128 +; RV64-NEXT: addi a1, a1, 32 ; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload ; RV64-NEXT: call ext3@plt -; RV64-NEXT: addi sp, s0, -144 -; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 144 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: li a1, 48 +; RV64-NEXT: mul a0, a0, a1 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 48 ; RV64-NEXT: ret %t = call fastcc @ext3( %z, %y, %x, i32 %w, i32 42) ret %t @@ -506,26 +504,23 @@ define fastcc @pass_vector_arg_indirect_stack( %x, %y, %z) { ; RV32-LABEL: pass_vector_arg_indirect_stack: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -144 -; RV32-NEXT: .cfi_def_cfa_offset 144 -; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: .cfi_def_cfa_offset 32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 -; RV32-NEXT: addi s0, sp, 144 -; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 -; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: addi a1, sp, 128 +; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v8, (a1) ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 -; RV32-NEXT: addi a2, a2, 128 +; RV32-NEXT: addi a2, a2, 16 ; RV32-NEXT: vs8r.v v8, (a2) ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: vs8r.v v8, (a1) @@ -540,40 +535,39 @@ ; RV32-NEXT: csrr t2, vlenb ; RV32-NEXT: slli t2, t2, 4 ; RV32-NEXT: add t2, sp, t2 -; RV32-NEXT: addi t2, t2, 128 -; RV32-NEXT: addi t4, sp, 128 +; RV32-NEXT: addi t2, t2, 16 +; RV32-NEXT: addi t4, sp, 16 ; RV32-NEXT: li t6, 8 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: li a0, 0 ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call vector_arg_indirect_stack@plt -; RV32-NEXT: addi sp, s0, -144 -; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload -; RV32-NEXT: addi sp, sp, 144 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: pass_vector_arg_indirect_stack: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -144 -; RV64-NEXT: .cfi_def_cfa_offset 144 -; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: addi sp, sp, -32 +; RV64-NEXT: .cfi_def_cfa_offset 32 +; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 -; RV64-NEXT: addi s0, sp, 144 -; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 -; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 -; RV64-NEXT: addi a1, sp, 128 +; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v8, (a1) ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: slli a2, a2, 4 ; RV64-NEXT: add a2, sp, a2 -; RV64-NEXT: addi a2, a2, 128 +; RV64-NEXT: addi a2, a2, 16 ; RV64-NEXT: vs8r.v v8, (a2) ; RV64-NEXT: add a1, a1, a0 ; RV64-NEXT: vs8r.v v8, (a1) @@ -588,16 +582,18 @@ ; RV64-NEXT: csrr t2, vlenb ; RV64-NEXT: slli t2, t2, 4 ; RV64-NEXT: add t2, sp, t2 -; RV64-NEXT: addi t2, t2, 128 -; RV64-NEXT: addi t4, sp, 128 +; RV64-NEXT: addi t2, t2, 16 +; RV64-NEXT: addi t4, sp, 16 ; RV64-NEXT: li t6, 8 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: li a0, 0 ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call vector_arg_indirect_stack@plt -; RV64-NEXT: addi sp, s0, -144 -; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 144 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 5 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 32 ; RV64-NEXT: ret %s = call fastcc @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, zeroinitializer, zeroinitializer, zeroinitializer, i32 8) ret %s diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -23,17 +23,14 @@ define @caller_scalable_vector_split_indirect( %x) { ; RV32-LABEL: caller_scalable_vector_split_indirect: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -144 -; RV32-NEXT: .cfi_def_cfa_offset 144 -; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: addi sp, sp, -48 +; RV32-NEXT: .cfi_def_cfa_offset 48 +; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 -; RV32-NEXT: addi s0, sp, 144 -; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: sub sp, sp, a0 -; RV32-NEXT: andi sp, sp, -128 -; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: addi a0, sp, 32 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 @@ -41,27 +38,26 @@ ; RV32-NEXT: vs8r.v v16, (a0) ; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: addi a0, sp, 32 ; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call callee_scalable_vector_split_indirect@plt -; RV32-NEXT: addi sp, s0, -144 -; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload -; RV32-NEXT: addi sp, sp, 144 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; ; RV64-LABEL: caller_scalable_vector_split_indirect: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -144 -; RV64-NEXT: .cfi_def_cfa_offset 144 -; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: addi sp, sp, -48 +; RV64-NEXT: .cfi_def_cfa_offset 48 +; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 -; RV64-NEXT: addi s0, sp, 144 -; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: sub sp, sp, a0 -; RV64-NEXT: andi sp, sp, -128 -; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 @@ -69,12 +65,14 @@ ; RV64-NEXT: vs8r.v v16, (a0) ; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 -; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: addi a0, sp, 32 ; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call callee_scalable_vector_split_indirect@plt -; RV64-NEXT: addi sp, s0, -144 -; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 144 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 48 ; RV64-NEXT: ret %c = alloca i64 %a = call @callee_scalable_vector_split_indirect( zeroinitializer, %x) diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll @@ -22,31 +22,29 @@ define @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, %x) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -80 -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; CHECK-NEXT: addi sp, sp, -48 +; CHECK-NEXT: .cfi_def_cfa_offset 48 +; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 -; CHECK-NEXT: addi s0, sp, 80 -; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: csrr t0, vlenb ; CHECK-NEXT: slli t0, t0, 4 ; CHECK-NEXT: sub sp, sp, t0 -; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: mv s1, sp -; CHECK-NEXT: addi t0, s1, 64 +; CHECK-NEXT: addi t0, sp, 32 ; CHECK-NEXT: vs8r.v v8, (t0) ; CHECK-NEXT: csrr t1, vlenb ; CHECK-NEXT: slli t1, t1, 3 -; CHECK-NEXT: add t1, s1, t1 -; CHECK-NEXT: addi t1, t1, 64 +; CHECK-NEXT: add t1, sp, t1 +; CHECK-NEXT: addi t1, t1, 32 ; CHECK-NEXT: vs8r.v v8, (t1) ; CHECK-NEXT: sd t0, 8(sp) ; CHECK-NEXT: sd t1, 0(sp) ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: call bar@plt -; CHECK-NEXT: addi sp, s0, -80 -; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload -; CHECK-NEXT: addi sp, sp, 80 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 48 ; CHECK-NEXT: ret %ret = call @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, %x, %x, %x, %x) ret %ret