Index: include/llvm/IR/Intrinsics.td =================================================================== --- include/llvm/IR/Intrinsics.td +++ include/llvm/IR/Intrinsics.td @@ -987,6 +987,10 @@ //===--------- Intrinsics that are used with scalable vector types --------===// +def int_experimental_vector_stepvector : Intrinsic<[llvm_anyvector_ty], + [], + [IntrNoMem]>; + def int_experimental_vector_vscale : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>; Index: lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.h +++ lib/Target/AArch64/AArch64ISelLowering.h @@ -198,6 +198,10 @@ /// multiple of MVT.getVectorNumElements(). VSCALE, + /// SERIES_VECTOR(INITIAL, STEP) - Creates a vector, with the first lane + /// containing INITIAL and each subsequent lane incremented by STEP + SERIES_VECTOR, + // NEON Load/Store with post-increment base updates LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE, LD3post, Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1219,6 +1219,7 @@ case AArch64ISD::FRSQRTE: return "AArch64ISD::FRSQRTE"; case AArch64ISD::FRSQRTS: return "AArch64ISD::FRSQRTS"; case AArch64ISD::VSCALE: return "AArch64ISD::VSCALE"; + case AArch64ISD::SERIES_VECTOR: return "AArch64ISD::SERIES_VECTOR"; } return nullptr; } @@ -2668,6 +2669,19 @@ return VScale; } + case Intrinsic::experimental_vector_stepvector: { + auto ScalarVT = VT.getScalarType(); + assert (ScalarVT.isInteger() && + "Attempted to get a floating point stepvector"); + // Legalize the input types; anything smaller than an i64 + // will use the 32b w regs instead. + if (ScalarVT != MVT::i64) + ScalarVT = MVT::i32; + auto SeriesVec = DAG.getNode(AArch64ISD::SERIES_VECTOR, dl, VT, + DAG.getConstant(0, dl, ScalarVT), + DAG.getConstant(1, dl, ScalarVT)); + return SeriesVec; + } } } Index: lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- lib/Target/AArch64/AArch64SVEInstrInfo.td +++ lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -12,7 +12,12 @@ //===----------------------------------------------------------------------===// def SDT_AArch64VScale : SDTypeProfile<1, 1, [SDTCisVT<0,i64>, SDTCisInt<1>]>; +def SDT_AArch64SeriesVector : SDTypeProfile<1, 2, [ + SDTCisVec<0>, SDTCisSameAs<1,2>, SDTCisInt<2> +]>; def AArch64vscale : SDNode<"AArch64ISD::VSCALE", SDT_AArch64VScale>; +def AArch64seriesvec : SDNode<"AArch64ISD::SERIES_VECTOR", SDT_AArch64SeriesVector>; + let Predicates = [HasSVE] in { defm ADD_ZZZ : sve_int_bin_cons_arit_0<0b000, "add">; defm SUB_ZZZ : sve_int_bin_cons_arit_0<0b001, "sub">; @@ -486,4 +491,15 @@ // Unoptimized vscale def : Pat<(AArch64vscale (simm6_32b:$imm)), (RDVLI_XI $imm)>; + + // Unoptimized seriesvector patterns + def : Pat<(nxv16i8 (AArch64seriesvec GPR32:$start, GPR32:$step)), + (INDEX_RR_B $start, $step)>; + def : Pat<(nxv8i16 (AArch64seriesvec GPR32:$start, GPR32:$step)), + (INDEX_RR_H $start, $step)>; + def : Pat<(nxv4i32 (AArch64seriesvec GPR32:$start, GPR32:$step)), + (INDEX_RR_S $start, $step)>; + def : Pat<(nxv2i64 (AArch64seriesvec GPR64:$start, GPR64:$step)), + (INDEX_RR_D $start, $step)>; + } Index: test/CodeGen/AArch64/SVE/index.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/SVE/index.ll @@ -0,0 +1,45 @@ +; RUN: llc -verify-machineinstrs -mattr=+sve < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-gnueabi" + +declare @llvm.experimental.vector.stepvector.nxv16i8() +declare @llvm.experimental.vector.stepvector.nxv8i16() +declare @llvm.experimental.vector.stepvector.nxv4i32() +declare @llvm.experimental.vector.stepvector.nxv2i64() + +define @index_b() { +; CHECK-LABEL: index_b: +; CHECK: orr [[STEP:w[0-9]+]], wzr, #0x1 +; CHECK: index z0.b, wzr, [[STEP]] +; CHECK: ret + %ident = call @llvm.experimental.vector.stepvector.nxv16i8() + ret %ident +} + +define @index_h() { +; CHECK-LABEL: index_h: +; CHECK: orr [[STEP:w[0-9]+]], wzr, #0x1 +; CHECK: index z0.h, wzr, [[STEP]] +; CHECK: ret + %ident = call @llvm.experimental.vector.stepvector.nxv8i16() + ret %ident +} + +define @index_s() { +; CHECK-LABEL: index_s: +; CHECK: orr [[STEP:w[0-9]+]], wzr, #0x1 +; CHECK: index z0.s, wzr, [[STEP]] +; CHECK: ret + %ident = call @llvm.experimental.vector.stepvector.nxv4i32() + ret %ident +} + +define @index_d() { +; CHECK-LABEL: index_d: +; CHECK: orr w[[STEP:[0-9]+]], wzr, #0x1 +; CHECK: index z0.d, xzr, x[[STEP]] +; CHECK: ret + %ident = call @llvm.experimental.vector.stepvector.nxv2i64() + ret %ident +}