diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -15,6 +15,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/Register.h" +#include "llvm/CodeGen/TargetFrameLowering.h" #include "llvm/Support/Alignment.h" #include #include @@ -486,14 +487,21 @@ return Objects[ObjectIdx + NumFixedObjects].Alignment; } + /// Should this stack ID be considered in MaxAlignment. + bool contributesToMaxAlignment(uint8_t StackID) { + return StackID == TargetStackID::Default || + StackID == TargetStackID::ScalableVector; + } + /// setObjectAlignment - Change the alignment of the specified stack object. void setObjectAlignment(int ObjectIdx, Align Alignment) { assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() && "Invalid Object Idx!"); Objects[ObjectIdx + NumFixedObjects].Alignment = Alignment; - // Only ensure max alignment for the default stack. - if (getStackID(ObjectIdx) == 0) + // Only ensure max alignment for the default and scalable vector stack. + uint8_t StackID = getStackID(ObjectIdx); + if (contributesToMaxAlignment(StackID)) ensureMaxAlignment(Alignment); } diff --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp --- a/llvm/lib/CodeGen/MachineFrameInfo.cpp +++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp @@ -58,7 +58,7 @@ !IsSpillSlot, StackID)); int Index = (int)Objects.size() - NumFixedObjects - 1; assert(Index >= 0 && "Bad frame index!"); - if (StackID == 0) + if (contributesToMaxAlignment(StackID)) ensureMaxAlignment(Alignment); return Index; } diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -515,6 +515,8 @@ if (STI.isRegisterReservedByUser(FPReg)) MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ MF.getFunction(), "Frame pointer required, but has been reserved."}); + // The frame pointer does need to be reserved from register allocation. + assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved"); adjustReg(MBB, MBBI, DL, FPReg, SPReg, RealStackSize - RVFI->getVarArgsSaveSize(), diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -289,7 +289,9 @@ ; RV32-NEXT: addi sp, sp, -144 ; RV32-NEXT: .cfi_def_cfa_offset 144 ; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 136(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 144 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a1, vlenb @@ -312,6 +314,7 @@ ; RV32-NEXT: call ext2@plt ; RV32-NEXT: addi sp, s0, -144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 ; RV32-NEXT: ret ; @@ -320,7 +323,9 @@ ; RV64-NEXT: addi sp, sp, -144 ; RV64-NEXT: .cfi_def_cfa_offset 144 ; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 144 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a1, vlenb @@ -343,6 +348,7 @@ ; RV64-NEXT: call ext2@plt ; RV64-NEXT: addi sp, s0, -144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 ; RV64-NEXT: ret %t = call fastcc @ext2( %y, %x, i32 %w, i32 2) @@ -355,7 +361,9 @@ ; RV32-NEXT: addi sp, sp, -144 ; RV32-NEXT: .cfi_def_cfa_offset 144 ; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 136(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 144 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a1, vlenb @@ -412,6 +420,7 @@ ; RV32-NEXT: call ext3@plt ; RV32-NEXT: addi sp, s0, -144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 ; RV32-NEXT: ret ; @@ -420,7 +429,9 @@ ; RV64-NEXT: addi sp, sp, -144 ; RV64-NEXT: .cfi_def_cfa_offset 144 ; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 144 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a1, vlenb @@ -477,6 +488,7 @@ ; RV64-NEXT: call ext3@plt ; RV64-NEXT: addi sp, s0, -144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 ; RV64-NEXT: ret %t = call fastcc @ext3( %z, %y, %x, i32 %w, i32 42) @@ -509,7 +521,9 @@ ; RV32-NEXT: addi sp, sp, -144 ; RV32-NEXT: .cfi_def_cfa_offset 144 ; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 136(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 144 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a0, vlenb @@ -549,6 +563,7 @@ ; RV32-NEXT: call vector_arg_indirect_stack@plt ; RV32-NEXT: addi sp, s0, -144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 ; RV32-NEXT: ret ; @@ -557,7 +572,9 @@ ; RV64-NEXT: addi sp, sp, -144 ; RV64-NEXT: .cfi_def_cfa_offset 144 ; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 144 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a0, vlenb @@ -597,6 +614,7 @@ ; RV64-NEXT: call vector_arg_indirect_stack@plt ; RV64-NEXT: addi sp, s0, -144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 ; RV64-NEXT: ret %s = call fastcc @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, zeroinitializer, zeroinitializer, zeroinitializer, i32 8) diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -26,7 +26,9 @@ ; RV32-NEXT: addi sp, sp, -144 ; RV32-NEXT: .cfi_def_cfa_offset 144 ; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 136(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 144 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a0, vlenb @@ -46,6 +48,7 @@ ; RV32-NEXT: call callee_scalable_vector_split_indirect@plt ; RV32-NEXT: addi sp, s0, -144 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 144 ; RV32-NEXT: ret ; @@ -54,7 +57,9 @@ ; RV64-NEXT: addi sp, sp, -144 ; RV64-NEXT: .cfi_def_cfa_offset 144 ; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 144 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a0, vlenb @@ -74,6 +79,7 @@ ; RV64-NEXT: call callee_scalable_vector_split_indirect@plt ; RV64-NEXT: addi sp, s0, -144 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 144 ; RV64-NEXT: ret %c = alloca i64 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -593,9 +593,13 @@ define double @extractelt_nxv16f64_neg1( %v) { ; RV32-LABEL: extractelt_nxv16f64_neg1: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -64 -; RV32-NEXT: .cfi_def_cfa_offset 64 -; RV32-NEXT: addi s0, sp, 64 +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: .cfi_def_cfa_offset 80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -610,15 +614,21 @@ ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: fld fa0, -8(a0) -; RV32-NEXT: addi sp, s0, -64 -; RV32-NEXT: addi sp, sp, 64 +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: extractelt_nxv16f64_neg1: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -64 -; RV64-NEXT: .cfi_def_cfa_offset 64 -; RV64-NEXT: addi s0, sp, 64 +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: .cfi_def_cfa_offset 80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 @@ -641,8 +651,10 @@ ; RV64-NEXT: slli a1, a2, 3 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: fld fa0, 0(a0) -; RV64-NEXT: addi sp, s0, -64 -; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret %r = extractelement %v, i32 -1 ret double %r @@ -660,34 +672,75 @@ } define double @extractelt_nxv16f64_idx( %v, i32 zeroext %idx) { -; CHECK-LABEL: extractelt_nxv16f64_idx: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a2, a1, 1 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: bltu a0, a2, .LBB54_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a2 -; CHECK-NEXT: .LBB54_2: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 -; CHECK-NEXT: .cfi_def_cfa s0, 0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: addi a2, sp, 64 -; CHECK-NEXT: add a0, a2, a0 -; CHECK-NEXT: vs8r.v v8, (a2) -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, a2, a1 -; CHECK-NEXT: vs8r.v v16, (a1) -; CHECK-NEXT: fld fa0, 0(a0) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 -; CHECK-NEXT: ret +; RV32-LABEL: extractelt_nxv16f64_idx: +; RV32: # %bb.0: +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a2, a1, 1 +; RV32-NEXT: addi a2, a2, -1 +; RV32-NEXT: bltu a0, a2, .LBB54_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a2 +; RV32-NEXT: .LBB54_2: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: .cfi_def_cfa_offset 80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: sub sp, sp, a2 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: addi a2, sp, 64 +; RV32-NEXT: add a0, a2, a0 +; RV32-NEXT: vs8r.v v8, (a2) +; RV32-NEXT: slli a1, a1, 3 +; RV32-NEXT: add a1, a2, a1 +; RV32-NEXT: vs8r.v v16, (a1) +; RV32-NEXT: fld fa0, 0(a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: extractelt_nxv16f64_idx: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a2, a1, 1 +; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: bltu a0, a2, .LBB54_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a2 +; RV64-NEXT: .LBB54_2: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: .cfi_def_cfa_offset 80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 4 +; RV64-NEXT: sub sp, sp, a2 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: addi a2, sp, 64 +; RV64-NEXT: add a0, a2, a0 +; RV64-NEXT: vs8r.v v8, (a2) +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, a2, a1 +; RV64-NEXT: vs8r.v v16, (a1) +; RV64-NEXT: fld fa0, 0(a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret %r = extractelement %v, i32 %idx ret double %r } diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define i1 @extractelt_nxv1i1(* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_nxv1i1: @@ -129,41 +129,85 @@ } define i1 @extractelt_nxv128i1(* %x, i64 %idx) nounwind { -; CHECK-LABEL: extractelt_nxv128i1: -; CHECK: # %bb.0: -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a3, a2, 4 -; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: bltu a1, a3, .LBB7_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a1, a3 -; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: addi s0, sp, 64 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 4 -; CHECK-NEXT: sub sp, sp, a3 -; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: addi a3, sp, 64 -; CHECK-NEXT: slli a2, a2, 3 -; CHECK-NEXT: add a4, a0, a2 -; CHECK-NEXT: vl8r.v v16, (a4) -; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: add a0, a3, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma -; CHECK-NEXT: vmseq.vi v8, v16, 0 -; CHECK-NEXT: vmseq.vi v0, v24, 0 -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 -; CHECK-NEXT: vs8r.v v24, (a3) -; CHECK-NEXT: add a1, a3, a2 -; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 -; CHECK-NEXT: vs8r.v v8, (a1) -; CHECK-NEXT: lb a0, 0(a0) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 -; CHECK-NEXT: ret +; RV32-LABEL: extractelt_nxv128i1: +; RV32: # %bb.0: +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a3, a2, 4 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: bltu a1, a3, .LBB7_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a1, a3 +; RV32-NEXT: .LBB7_2: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: sub sp, sp, a3 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: addi a3, sp, 64 +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a4, a0, a2 +; RV32-NEXT: vl8r.v v16, (a4) +; RV32-NEXT: vl8r.v v24, (a0) +; RV32-NEXT: add a0, a3, a1 +; RV32-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; RV32-NEXT: vmseq.vi v8, v16, 0 +; RV32-NEXT: vmseq.vi v0, v24, 0 +; RV32-NEXT: vmv.v.i v16, 0 +; RV32-NEXT: vmerge.vim v24, v16, 1, v0 +; RV32-NEXT: vs8r.v v24, (a3) +; RV32-NEXT: add a1, a3, a2 +; RV32-NEXT: vmv1r.v v0, v8 +; RV32-NEXT: vmerge.vim v8, v16, 1, v0 +; RV32-NEXT: vs8r.v v8, (a1) +; RV32-NEXT: lb a0, 0(a0) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: extractelt_nxv128i1: +; RV64: # %bb.0: +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a3, a2, 4 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: bltu a1, a3, .LBB7_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a1, a3 +; RV64-NEXT: .LBB7_2: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: csrr a3, vlenb +; RV64-NEXT: slli a3, a3, 4 +; RV64-NEXT: sub sp, sp, a3 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: addi a3, sp, 64 +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a4, a0, a2 +; RV64-NEXT: vl8r.v v16, (a4) +; RV64-NEXT: vl8r.v v24, (a0) +; RV64-NEXT: add a0, a3, a1 +; RV64-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; RV64-NEXT: vmseq.vi v8, v16, 0 +; RV64-NEXT: vmseq.vi v0, v24, 0 +; RV64-NEXT: vmv.v.i v16, 0 +; RV64-NEXT: vmerge.vim v24, v16, 1, v0 +; RV64-NEXT: vs8r.v v24, (a3) +; RV64-NEXT: add a1, a3, a2 +; RV64-NEXT: vmv1r.v v0, v8 +; RV64-NEXT: vmerge.vim v8, v16, 1, v0 +; RV64-NEXT: vs8r.v v8, (a1) +; RV64-NEXT: lb a0, 0(a0) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer %c = extractelement %b, i64 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -873,9 +873,13 @@ define i32 @extractelt_nxv32i32_neg1( %v) { ; CHECK-LABEL: extractelt_nxv32i32_neg1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: addi sp, sp, -80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; CHECK-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; CHECK-NEXT: .cfi_offset ra, -4 +; CHECK-NEXT: .cfi_offset s0, -8 +; CHECK-NEXT: addi s0, sp, 80 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -890,8 +894,10 @@ ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: lw a0, -4(a0) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 80 ; CHECK-NEXT: ret %r = extractelement %v, i32 -1 ret i32 %r @@ -918,9 +924,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: .LBB74_2: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: addi sp, sp, -80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; CHECK-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; CHECK-NEXT: .cfi_offset ra, -4 +; CHECK-NEXT: .cfi_offset s0, -8 +; CHECK-NEXT: addi s0, sp, 80 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 4 @@ -934,8 +944,10 @@ ; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: lw a0, 0(a0) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; CHECK-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 80 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i32 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -804,9 +804,13 @@ define i64 @extractelt_nxv16i64_neg1( %v) { ; CHECK-LABEL: extractelt_nxv16i64_neg1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: addi sp, sp, -80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; CHECK-NEXT: .cfi_offset ra, -8 +; CHECK-NEXT: .cfi_offset s0, -16 +; CHECK-NEXT: addi s0, sp, 80 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -829,8 +833,10 @@ ; CHECK-NEXT: slli a1, a2, 3 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: ld a0, 0(a0) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 80 ; CHECK-NEXT: ret %r = extractelement %v, i32 -1 ret i64 %r @@ -857,9 +863,13 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a2 ; CHECK-NEXT: .LBB74_2: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: addi sp, sp, -80 +; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; CHECK-NEXT: .cfi_offset ra, -8 +; CHECK-NEXT: .cfi_offset s0, -16 +; CHECK-NEXT: addi s0, sp, 80 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 4 @@ -873,8 +883,10 @@ ; CHECK-NEXT: add a1, a2, a1 ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: ld a0, 0(a0) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: addi sp, s0, -80 +; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 80 ; CHECK-NEXT: ret %r = extractelement %v, i32 %idx ret i64 %r diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -1,13 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX2 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,LMULMAX1 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX2 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v --riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -early-live-intervals -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,LMULMAX1 define @insert_nxv8i32_v2i32_0( %vec, <2 x i32>* %svp) { ; CHECK-LABEL: insert_nxv8i32_v2i32_0: @@ -557,32 +557,71 @@ ; Check we don't mistakenly optimize this: we don't know whether this is ; inserted into the low or high split vector. define void @insert_v2i64_nxv16i64_hi(<2 x i64>* %psv, * %out) { -; CHECK-LABEL: insert_v2i64_nxv16i64_hi: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 -; CHECK-NEXT: .cfi_def_cfa s0, 0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a2, a2, 4 -; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a0, sp, 128 -; CHECK-NEXT: vse64.v v8, (a0) -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: addi a2, sp, 64 -; CHECK-NEXT: add a3, a2, a0 -; CHECK-NEXT: vl8re64.v v8, (a3) -; CHECK-NEXT: vl8re64.v v16, (a2) -; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vs8r.v v8, (a0) -; CHECK-NEXT: vs8r.v v16, (a1) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 -; CHECK-NEXT: ret +; RV32-LABEL: insert_v2i64_nxv16i64_hi: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -80 +; RV32-NEXT: .cfi_def_cfa_offset 80 +; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 +; RV32-NEXT: .cfi_offset s0, -8 +; RV32-NEXT: addi s0, sp, 80 +; RV32-NEXT: .cfi_def_cfa s0, 0 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: sub sp, sp, a2 +; RV32-NEXT: andi sp, sp, -64 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: addi a0, sp, 128 +; RV32-NEXT: vse64.v v8, (a0) +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: addi a2, sp, 64 +; RV32-NEXT: add a3, a2, a0 +; RV32-NEXT: vl8re64.v v8, (a3) +; RV32-NEXT: vl8re64.v v16, (a2) +; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: vs8r.v v8, (a0) +; RV32-NEXT: vs8r.v v16, (a1) +; RV32-NEXT: addi sp, s0, -80 +; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 80 +; RV32-NEXT: ret +; +; RV64-LABEL: insert_v2i64_nxv16i64_hi: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: .cfi_def_cfa_offset 80 +; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 +; RV64-NEXT: .cfi_offset s0, -16 +; RV64-NEXT: addi s0, sp, 80 +; RV64-NEXT: .cfi_def_cfa s0, 0 +; RV64-NEXT: csrr a2, vlenb +; RV64-NEXT: slli a2, a2, 4 +; RV64-NEXT: sub sp, sp, a2 +; RV64-NEXT: andi sp, sp, -64 +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi a0, sp, 128 +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: addi a2, sp, 64 +; RV64-NEXT: add a3, a2, a0 +; RV64-NEXT: vl8re64.v v8, (a3) +; RV64-NEXT: vl8re64.v v16, (a2) +; RV64-NEXT: add a0, a1, a0 +; RV64-NEXT: vs8r.v v8, (a0) +; RV64-NEXT: vs8r.v v16, (a1) +; RV64-NEXT: addi sp, s0, -80 +; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv %v = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 8) store %v, * %out diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -1668,35 +1668,221 @@ } define @reverse_nxv12i64( %a) { -; CHECK-LABEL: reverse_nxv12i64: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -64 -; CHECK-NEXT: .cfi_def_cfa_offset 64 -; CHECK-NEXT: addi s0, sp, 64 -; CHECK-NEXT: .cfi_def_cfa s0, 0 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: addi a1, a0, -1 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; CHECK-NEXT: vid.v v24 -; CHECK-NEXT: vrsub.vx v24, v24, a1 -; CHECK-NEXT: vrgather.vv v0, v16, v24 -; CHECK-NEXT: vmv4r.v v16, v4 -; CHECK-NEXT: vrgather.vv v0, v8, v24 -; CHECK-NEXT: vmv4r.v v20, v0 -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: addi a1, sp, 64 -; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vs4r.v v4, (a0) -; CHECK-NEXT: vs8r.v v16, (a1) -; CHECK-NEXT: vl8re64.v v16, (a0) -; CHECK-NEXT: vl8re64.v v8, (a1) -; CHECK-NEXT: addi sp, s0, -64 -; CHECK-NEXT: addi sp, sp, 64 -; CHECK-NEXT: ret +; RV32-BITS-UNKNOWN-LABEL: reverse_nxv12i64: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, -80 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 80 +; RV32-BITS-UNKNOWN-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-BITS-UNKNOWN-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-BITS-UNKNOWN-NEXT: .cfi_offset ra, -4 +; RV32-BITS-UNKNOWN-NEXT: .cfi_offset s0, -8 +; RV32-BITS-UNKNOWN-NEXT: addi s0, sp, 80 +; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 4 +; RV32-BITS-UNKNOWN-NEXT: sub sp, sp, a0 +; RV32-BITS-UNKNOWN-NEXT: andi sp, sp, -64 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: addi a1, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-BITS-UNKNOWN-NEXT: vid.v v24 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v24, a1 +; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v0, v16, v24 +; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v16, v4 +; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v0, v8, v24 +; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v20, v0 +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 3 +; RV32-BITS-UNKNOWN-NEXT: addi a1, sp, 64 +; RV32-BITS-UNKNOWN-NEXT: add a0, a1, a0 +; RV32-BITS-UNKNOWN-NEXT: vs4r.v v4, (a0) +; RV32-BITS-UNKNOWN-NEXT: vs8r.v v16, (a1) +; RV32-BITS-UNKNOWN-NEXT: vl8re64.v v16, (a0) +; RV32-BITS-UNKNOWN-NEXT: vl8re64.v v8, (a1) +; RV32-BITS-UNKNOWN-NEXT: addi sp, s0, -80 +; RV32-BITS-UNKNOWN-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-BITS-UNKNOWN-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, 80 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_nxv12i64: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: addi sp, sp, -80 +; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 80 +; RV32-BITS-256-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-BITS-256-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-BITS-256-NEXT: .cfi_offset ra, -4 +; RV32-BITS-256-NEXT: .cfi_offset s0, -8 +; RV32-BITS-256-NEXT: addi s0, sp, 80 +; RV32-BITS-256-NEXT: .cfi_def_cfa s0, 0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 4 +; RV32-BITS-256-NEXT: sub sp, sp, a0 +; RV32-BITS-256-NEXT: andi sp, sp, -64 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: addi a1, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-BITS-256-NEXT: vid.v v24 +; RV32-BITS-256-NEXT: vrsub.vx v24, v24, a1 +; RV32-BITS-256-NEXT: vrgather.vv v0, v16, v24 +; RV32-BITS-256-NEXT: vmv4r.v v16, v4 +; RV32-BITS-256-NEXT: vrgather.vv v0, v8, v24 +; RV32-BITS-256-NEXT: vmv4r.v v20, v0 +; RV32-BITS-256-NEXT: slli a0, a0, 3 +; RV32-BITS-256-NEXT: addi a1, sp, 64 +; RV32-BITS-256-NEXT: add a0, a1, a0 +; RV32-BITS-256-NEXT: vs4r.v v4, (a0) +; RV32-BITS-256-NEXT: vs8r.v v16, (a1) +; RV32-BITS-256-NEXT: vl8re64.v v16, (a0) +; RV32-BITS-256-NEXT: vl8re64.v v8, (a1) +; RV32-BITS-256-NEXT: addi sp, s0, -80 +; RV32-BITS-256-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-BITS-256-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-BITS-256-NEXT: addi sp, sp, 80 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_nxv12i64: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: addi sp, sp, -80 +; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 80 +; RV32-BITS-512-NEXT: sw ra, 76(sp) # 4-byte Folded Spill +; RV32-BITS-512-NEXT: sw s0, 72(sp) # 4-byte Folded Spill +; RV32-BITS-512-NEXT: .cfi_offset ra, -4 +; RV32-BITS-512-NEXT: .cfi_offset s0, -8 +; RV32-BITS-512-NEXT: addi s0, sp, 80 +; RV32-BITS-512-NEXT: .cfi_def_cfa s0, 0 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 4 +; RV32-BITS-512-NEXT: sub sp, sp, a0 +; RV32-BITS-512-NEXT: andi sp, sp, -64 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: addi a1, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-BITS-512-NEXT: vid.v v24 +; RV32-BITS-512-NEXT: vrsub.vx v24, v24, a1 +; RV32-BITS-512-NEXT: vrgather.vv v0, v16, v24 +; RV32-BITS-512-NEXT: vmv4r.v v16, v4 +; RV32-BITS-512-NEXT: vrgather.vv v0, v8, v24 +; RV32-BITS-512-NEXT: vmv4r.v v20, v0 +; RV32-BITS-512-NEXT: slli a0, a0, 3 +; RV32-BITS-512-NEXT: addi a1, sp, 64 +; RV32-BITS-512-NEXT: add a0, a1, a0 +; RV32-BITS-512-NEXT: vs4r.v v4, (a0) +; RV32-BITS-512-NEXT: vs8r.v v16, (a1) +; RV32-BITS-512-NEXT: vl8re64.v v16, (a0) +; RV32-BITS-512-NEXT: vl8re64.v v8, (a1) +; RV32-BITS-512-NEXT: addi sp, s0, -80 +; RV32-BITS-512-NEXT: lw ra, 76(sp) # 4-byte Folded Reload +; RV32-BITS-512-NEXT: lw s0, 72(sp) # 4-byte Folded Reload +; RV32-BITS-512-NEXT: addi sp, sp, 80 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_nxv12i64: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, -80 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 80 +; RV64-BITS-UNKNOWN-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-BITS-UNKNOWN-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-BITS-UNKNOWN-NEXT: .cfi_offset ra, -8 +; RV64-BITS-UNKNOWN-NEXT: .cfi_offset s0, -16 +; RV64-BITS-UNKNOWN-NEXT: addi s0, sp, 80 +; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 4 +; RV64-BITS-UNKNOWN-NEXT: sub sp, sp, a0 +; RV64-BITS-UNKNOWN-NEXT: andi sp, sp, -64 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: addi a1, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-BITS-UNKNOWN-NEXT: vid.v v24 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v24, a1 +; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v0, v16, v24 +; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v16, v4 +; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v0, v8, v24 +; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v20, v0 +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 3 +; RV64-BITS-UNKNOWN-NEXT: addi a1, sp, 64 +; RV64-BITS-UNKNOWN-NEXT: add a0, a1, a0 +; RV64-BITS-UNKNOWN-NEXT: vs4r.v v4, (a0) +; RV64-BITS-UNKNOWN-NEXT: vs8r.v v16, (a1) +; RV64-BITS-UNKNOWN-NEXT: vl8re64.v v16, (a0) +; RV64-BITS-UNKNOWN-NEXT: vl8re64.v v8, (a1) +; RV64-BITS-UNKNOWN-NEXT: addi sp, s0, -80 +; RV64-BITS-UNKNOWN-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-BITS-UNKNOWN-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, 80 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_nxv12i64: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: addi sp, sp, -80 +; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 80 +; RV64-BITS-256-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-BITS-256-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-BITS-256-NEXT: .cfi_offset ra, -8 +; RV64-BITS-256-NEXT: .cfi_offset s0, -16 +; RV64-BITS-256-NEXT: addi s0, sp, 80 +; RV64-BITS-256-NEXT: .cfi_def_cfa s0, 0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 4 +; RV64-BITS-256-NEXT: sub sp, sp, a0 +; RV64-BITS-256-NEXT: andi sp, sp, -64 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: addi a1, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-BITS-256-NEXT: vid.v v24 +; RV64-BITS-256-NEXT: vrsub.vx v24, v24, a1 +; RV64-BITS-256-NEXT: vrgather.vv v0, v16, v24 +; RV64-BITS-256-NEXT: vmv4r.v v16, v4 +; RV64-BITS-256-NEXT: vrgather.vv v0, v8, v24 +; RV64-BITS-256-NEXT: vmv4r.v v20, v0 +; RV64-BITS-256-NEXT: slli a0, a0, 3 +; RV64-BITS-256-NEXT: addi a1, sp, 64 +; RV64-BITS-256-NEXT: add a0, a1, a0 +; RV64-BITS-256-NEXT: vs4r.v v4, (a0) +; RV64-BITS-256-NEXT: vs8r.v v16, (a1) +; RV64-BITS-256-NEXT: vl8re64.v v16, (a0) +; RV64-BITS-256-NEXT: vl8re64.v v8, (a1) +; RV64-BITS-256-NEXT: addi sp, s0, -80 +; RV64-BITS-256-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-BITS-256-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-BITS-256-NEXT: addi sp, sp, 80 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_nxv12i64: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: addi sp, sp, -80 +; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 80 +; RV64-BITS-512-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; RV64-BITS-512-NEXT: sd s0, 64(sp) # 8-byte Folded Spill +; RV64-BITS-512-NEXT: .cfi_offset ra, -8 +; RV64-BITS-512-NEXT: .cfi_offset s0, -16 +; RV64-BITS-512-NEXT: addi s0, sp, 80 +; RV64-BITS-512-NEXT: .cfi_def_cfa s0, 0 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 4 +; RV64-BITS-512-NEXT: sub sp, sp, a0 +; RV64-BITS-512-NEXT: andi sp, sp, -64 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: addi a1, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV64-BITS-512-NEXT: vid.v v24 +; RV64-BITS-512-NEXT: vrsub.vx v24, v24, a1 +; RV64-BITS-512-NEXT: vrgather.vv v0, v16, v24 +; RV64-BITS-512-NEXT: vmv4r.v v16, v4 +; RV64-BITS-512-NEXT: vrgather.vv v0, v8, v24 +; RV64-BITS-512-NEXT: vmv4r.v v20, v0 +; RV64-BITS-512-NEXT: slli a0, a0, 3 +; RV64-BITS-512-NEXT: addi a1, sp, 64 +; RV64-BITS-512-NEXT: add a0, a1, a0 +; RV64-BITS-512-NEXT: vs4r.v v4, (a0) +; RV64-BITS-512-NEXT: vs8r.v v16, (a1) +; RV64-BITS-512-NEXT: vl8re64.v v16, (a0) +; RV64-BITS-512-NEXT: vl8re64.v v8, (a1) +; RV64-BITS-512-NEXT: addi sp, s0, -80 +; RV64-BITS-512-NEXT: ld ra, 72(sp) # 8-byte Folded Reload +; RV64-BITS-512-NEXT: ld s0, 64(sp) # 8-byte Folded Reload +; RV64-BITS-512-NEXT: addi sp, sp, 80 +; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv12i64( %a) ret %res } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll @@ -22,17 +22,22 @@ define @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, %x) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -80 -; CHECK-NEXT: .cfi_def_cfa_offset 80 -; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill +; CHECK-NEXT: addi sp, sp, -96 +; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: sd ra, 88(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 80(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s1, 72(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 -; CHECK-NEXT: addi s0, sp, 80 +; CHECK-NEXT: .cfi_offset s0, -16 +; CHECK-NEXT: .cfi_offset s1, -24 +; CHECK-NEXT: addi s0, sp, 96 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: csrr t0, vlenb ; CHECK-NEXT: slli t0, t0, 4 ; CHECK-NEXT: sub sp, sp, t0 ; CHECK-NEXT: andi sp, sp, -64 ; CHECK-NEXT: mv s1, sp +; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: addi t0, s1, 64 ; CHECK-NEXT: vs8r.v v8, (t0) ; CHECK-NEXT: csrr t1, vlenb @@ -44,9 +49,12 @@ ; CHECK-NEXT: sd t1, 0(sp) ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: call bar@plt -; CHECK-NEXT: addi sp, s0, -80 -; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload -; CHECK-NEXT: addi sp, sp, 80 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: addi sp, s0, -96 +; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 80(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s1, 72(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 96 ; CHECK-NEXT: ret %ret = call @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, %x, %x, %x, %x) ret %ret