diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -899,6 +899,7 @@ bool isExtFreeImpl(const Instruction *Ext) const override; void addTypeForNEON(MVT VT); + void addTypeForStreamingSVE(MVT VT); void addTypeForFixedLengthSVE(MVT VT); void addDRTypeForNEON(MVT VT); void addQRTypeForNEON(MVT VT); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1391,6 +1391,16 @@ for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64}) setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + if (Subtarget->forceStreamingCompatibleSVE()) { + for (MVT VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, + MVT::v4i32, MVT::v2i64}) + addTypeForStreamingSVE(VT); + + for (MVT VT : + {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64}) + addTypeForStreamingSVE(VT); + } + // NOTE: Currently this has to happen after computeRegisterProperties rather // than the preferred option of combining it with the addRegisterClass call. if (Subtarget->useSVEForFixedLengthVectors()) { @@ -1597,6 +1607,16 @@ return false; } +void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) { + if (VT.isInteger()) { + setOperationAction(ISD::ANY_EXTEND, VT, Custom); + setOperationAction(ISD::ZERO_EXTEND, VT, Custom); + setOperationAction(ISD::SIGN_EXTEND, VT, Custom); + } + setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); + setOperationAction(ISD::AND, VT, Custom); +} + void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); @@ -5773,8 +5793,7 @@ case ISD::MLOAD: return LowerMLOAD(Op, DAG); case ISD::LOAD: - if (useSVEForFixedLengthVectorVT(Op.getValueType(), - Subtarget->forceStreamingCompatibleSVE())) + if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerFixedLengthVectorLoadToSVE(Op, DAG); return LowerLOAD(Op, DAG); case ISD::ADD: @@ -11397,12 +11416,17 @@ } // Try 32-bit splatted SIMD immediate. -static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, - const APInt &Bits, - const SDValue *LHS = nullptr) { +static SDValue +tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, + const APInt &Bits, const SDValue *LHS = nullptr, + const AArch64Subtarget *const Subtarget = nullptr) { + EVT VT = Op.getValueType(); + if (Subtarget && VT.isFixedLengthVector() && + Subtarget->forceStreamingCompatibleSVE()) + return SDValue(); + if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); - EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; bool isAdvSIMDModImm = false; uint64_t Shift; @@ -11445,12 +11469,17 @@ } // Try 16-bit splatted SIMD immediate. -static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, - const APInt &Bits, - const SDValue *LHS = nullptr) { +static SDValue +tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, + const APInt &Bits, const SDValue *LHS = nullptr, + const AArch64Subtarget *const Subtarget = nullptr) { + EVT VT = Op.getValueType(); + if (Subtarget && VT.isFixedLengthVector() && + Subtarget->forceStreamingCompatibleSVE()) + return SDValue(); + if (Bits.getHiBits(64) == Bits.getLoBits(64)) { uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); - EVT VT = Op.getValueType(); MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; bool isAdvSIMDModImm = false; uint64_t Shift; @@ -12128,7 +12157,8 @@ SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { - if (useSVEForFixedLengthVectorVT(Op.getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getValueType(), + Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthConcatVectorsToSVE(Op, DAG); assert(Op.getValueType().isScalableVector() && @@ -12234,7 +12264,8 @@ return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType()); } - if (useSVEForFixedLengthVectorVT(VT)) + if (useSVEForFixedLengthVectorVT(VT, + Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthExtractVectorElt(Op, DAG); // Check for non-constant or out of range lane. @@ -12296,10 +12327,11 @@ // If this is extracting the upper 64-bits of a 128-bit vector, we match // that directly. if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 && - InVT.getSizeInBits() == 128) + InVT.getSizeInBits() == 128 && !Subtarget->forceStreamingCompatibleSVE()) return Op; - if (useSVEForFixedLengthVectorVT(InVT)) { + if (useSVEForFixedLengthVectorVT(InVT, + Subtarget->forceStreamingCompatibleSVE())) { SDLoc DL(Op); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); @@ -12487,7 +12519,8 @@ bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef M, EVT VT) const { // Currently no fixed length shuffles that require SVE are legal. - if (useSVEForFixedLengthVectorVT(VT)) + if (useSVEForFixedLengthVectorVT(VT, + Subtarget->forceStreamingCompatibleSVE())) return false; if (VT.getVectorNumElements() == 4 && @@ -12597,7 +12630,9 @@ switch (Op.getOpcode()) { case ISD::SHL: - if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) + if (VT.isScalableVector() || + useSVEForFixedLengthVectorVT(VT, + Subtarget->forceStreamingCompatibleSVE())) return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED); if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) @@ -12609,7 +12644,9 @@ Op.getOperand(0), Op.getOperand(1)); case ISD::SRA: case ISD::SRL: - if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) { + if (VT.isScalableVector() || + useSVEForFixedLengthVectorVT( + VT, Subtarget->forceStreamingCompatibleSVE())) { unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED : AArch64ISD::SRL_PRED; return LowerToPredicatedOp(Op, DAG, Opc); @@ -14008,6 +14045,12 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const { + // Skip if streaming compatible SVE is enabled, + // because it generates invalid code in streaming mode when SVE length is not + // specified. + if (Subtarget->forceStreamingCompatibleSVE()) + return false; + assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor"); @@ -15772,7 +15815,8 @@ } static SDValue performANDCombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI) { + TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *const Subtarget) { SelectionDAG &DAG = DCI.DAG; SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); @@ -15807,16 +15851,16 @@ DefBits = ~DefBits; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, - DefBits, &LHS)) || + DefBits, &LHS, Subtarget)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, - DefBits, &LHS))) + DefBits, &LHS, Subtarget))) return NewOp; UndefBits = ~UndefBits; if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, - UndefBits, &LHS)) || + UndefBits, &LHS, Subtarget)) || (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, - UndefBits, &LHS))) + UndefBits, &LHS, Subtarget))) return NewOp; } @@ -20610,7 +20654,7 @@ case ISD::OR: return performORCombine(N, DCI, Subtarget); case ISD::AND: - return performANDCombine(N, DCI); + return performANDCombine(N, DCI, Subtarget); case ISD::INTRINSIC_WO_CHAIN: return performIntrinsicCombine(N, DCI, Subtarget); case ISD::ANY_EXTEND: @@ -22417,7 +22461,7 @@ SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - assert(useSVEForFixedLengthVectorVT(VT) && + assert(VT.isFixedLengthVector() && isTypeLegal(VT) && "Only expected to lower fixed length vector operation!"); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); @@ -22433,7 +22477,8 @@ } // "cast" fixed length vector to a scalable vector. - assert(useSVEForFixedLengthVectorVT(V.getValueType()) && + assert(useSVEForFixedLengthVectorVT( + V.getValueType(), Subtarget->forceStreamingCompatibleSVE()) && "Only fixed length vectors are supported!"); Ops.push_back(convertToScalableVector(DAG, ContainerVT, V)); } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -3032,7 +3032,7 @@ (EXTRACT_SUBREG (DUP_ZZI_D ZPR:$vec, sve_elm_idx_extdup_d:$index), dsub)>; // Extract element from vector with immediate index that's within the bottom 128-bits. - let AddedComplexity = 1 in { + let Predicates = [NotInStreamingSVEMode], AddedComplexity = 1 in { def : Pat<(i32 (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), (i32 (UMOVvi8 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; def : Pat<(i32 (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index)), @@ -3042,7 +3042,7 @@ def : Pat<(i64 (vector_extract (nxv2i64 ZPR:$vec), VectorIndexD:$index)), (i64 (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index))>; } - + let Predicates = [NotInStreamingSVEMode] in { def : Pat<(sext_inreg (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index), i8), (i32 (SMOVvi8to32 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; def : Pat<(sext_inreg (anyext (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), i8), @@ -3055,7 +3055,7 @@ def : Pat<(sext (vector_extract (nxv4i32 ZPR:$vec), VectorIndexS:$index)), (i64 (SMOVvi32to64 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>; - + } // Extract first element from vector. let AddedComplexity = 2 in { def : Pat<(vector_extract (nxv16i8 ZPR:$Zs), (i64 0)), diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-fixed-length-int-shifts.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-fixed-length-int-shifts.ll @@ -0,0 +1,640 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; ASHR +; + +define <4 x i8> @ashr_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; CHECK-LABEL: ashr_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: adrp x9, .LCPI0_1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: ldr d3, [x9, :lo12:.LCPI0_1] +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <4 x i8> %op1, %op2 + ret <4 x i8> %res +} + +define <8 x i8> @ashr_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: ashr_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <8 x i8> %op1, %op2 + ret <8 x i8> %res +} + +define <16 x i8> @ashr_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: ashr_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <16 x i8> %op1, %op2 + ret <16 x i8> %res +} + +define void @ashr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: ashr_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: asr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: asr z1.b, p0/m, z1.b, z3.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = ashr <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @ashr_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; CHECK-LABEL: ashr_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: adrp x9, .LCPI4_1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: ldr d3, [x9, :lo12:.LCPI4_1] +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <2 x i16> %op1, %op2 + ret <2 x i16> %res +} + +define <4 x i16> @ashr_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: ashr_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <4 x i16> %op1, %op2 + ret <4 x i16> %res +} + +define <8 x i16> @ashr_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: ashr_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <8 x i16> %op1, %op2 + ret <8 x i16> %res +} + +define void @ashr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: ashr_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: asr z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = ashr <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @ashr_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: ashr_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <2 x i32> %op1, %op2 + ret <2 x i32> %res +} + +define <4 x i32> @ashr_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: ashr_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <4 x i32> %op1, %op2 + ret <4 x i32> %res +} + +define void @ashr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: ashr_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: asr z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = ashr <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @ashr_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +; CHECK-LABEL: ashr_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <1 x i64> %op1, %op2 + ret <1 x i64> %res +} + +define <2 x i64> @ashr_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: ashr_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = ashr <2 x i64> %op1, %op2 + ret <2 x i64> %res +} + +define void @ashr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: ashr_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: asr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: asr z1.d, p0/m, z1.d, z3.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = ashr <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; LSHR +; + +define <4 x i8> @lshr_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; CHECK-LABEL: lshr_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <4 x i8> %op1, %op2 + ret <4 x i8> %res +} + +define <8 x i8> @lshr_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: lshr_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <8 x i8> %op1, %op2 + ret <8 x i8> %res +} + +define <16 x i8> @lshr_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: lshr_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <16 x i8> %op1, %op2 + ret <16 x i8> %res +} + +define void @lshr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: lshr_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z3.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = lshr <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @lshr_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; CHECK-LABEL: lshr_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <2 x i16> %op1, %op2 + ret <2 x i16> %res +} + +define <4 x i16> @lshr_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: lshr_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <4 x i16> %op1, %op2 + ret <4 x i16> %res +} + +define <8 x i16> @lshr_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: lshr_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <8 x i16> %op1, %op2 + ret <8 x i16> %res +} + +define void @lshr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: lshr_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = lshr <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @lshr_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: lshr_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <2 x i32> %op1, %op2 + ret <2 x i32> %res +} + +define <4 x i32> @lshr_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: lshr_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <4 x i32> %op1, %op2 + ret <4 x i32> %res +} + +define void @lshr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: lshr_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = lshr <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @lshr_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +; CHECK-LABEL: lshr_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <1 x i64> %op1, %op2 + ret <1 x i64> %res +} + +define <2 x i64> @lshr_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: lshr_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = lshr <2 x i64> %op1, %op2 + ret <2 x i64> %res +} + +define void @lshr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: lshr_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = lshr <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SHL +; + +define <2 x i8> @shl_v2i8(<2 x i8> %op1, <2 x i8> %op2) #0 { +; CHECK-LABEL: shl_v2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI28_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI28_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shl <2 x i8> %op1, %op2 + ret <2 x i8> %res +} + +define <4 x i8> @shl_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; CHECK-LABEL: shl_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI29_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI29_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shl <4 x i8> %op1, %op2 + ret <4 x i8> %res +} + +define <8 x i8> @shl_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: shl_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shl <8 x i8> %op1, %op2 + ret <8 x i8> %res +} + +define <16 x i8> @shl_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: shl_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shl <16 x i8> %op1, %op2 + ret <16 x i8> %res +} + +define void @shl_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: shl_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z3.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = shl <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <4 x i16> @shl_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: shl_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shl <4 x i16> %op1, %op2 + ret <4 x i16> %res +} + +define <8 x i16> @shl_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: shl_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shl <8 x i16> %op1, %op2 + ret <8 x i16> %res +} + +define void @shl_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: shl_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = shl <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @shl_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: shl_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shl <2 x i32> %op1, %op2 + ret <2 x i32> %res +} + +define <4 x i32> @shl_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: shl_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shl <4 x i32> %op1, %op2 + ret <4 x i32> %res +} + +define void @shl_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: shl_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = shl <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @shl_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +; CHECK-LABEL: shl_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shl <1 x i64> %op1, %op2 + ret <1 x i64> %res +} + +define <2 x i64> @shl_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: shl_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shl <2 x i64> %op1, %op2 + ret <2 x i64> %res +} + +define void @shl_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: shl_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = shl <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll @@ -0,0 +1,138 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @build_vector_7_inc1_v4i1(ptr %a) #0 { +; CHECK-LABEL: build_vector_7_inc1_v4i1: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: strb w8, [x0] +; CHECK-NEXT: ret + store <4 x i1> , ptr %a, align 1 + ret void +} + +define void @build_vector_7_inc1_v32i8(ptr %a) #0 { +; CHECK-LABEL: build_vector_7_inc1_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.b, #0, #1 +; CHECK-NEXT: mov z1.d, z0.d +; CHECK-NEXT: add z0.b, z0.b, #7 // =0x7 +; CHECK-NEXT: add z1.b, z1.b, #23 // =0x17 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + store <32 x i8> , ptr %a, align 1 + ret void +} + +define void @build_vector_0_inc2_v16i16(ptr %a) #0 { +; CHECK-LABEL: build_vector_0_inc2_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.h, #0, #2 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add z0.h, z0.h, #16 // =0x10 +; CHECK-NEXT: str q0, [x0, #16] +; CHECK-NEXT: ret + store <16 x i16> , ptr %a, align 2 + ret void +} + +; Negative const stride. +define void @build_vector_0_dec3_v8i32(ptr %a) #0 { +; CHECK-LABEL: build_vector_0_dec3_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.s, #0, #-3 +; CHECK-NEXT: mov z1.s, #-12 // =0xfffffffffffffff4 +; CHECK-NEXT: add z1.s, z0.s, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + store <8 x i32> , ptr %a, align 4 + ret void +} + +; Constant stride that's too big to be directly encoded into the index. +define void @build_vector_minus2_dec32_v4i64(ptr %a) #0 { +; CHECK-LABEL: build_vector_minus2_dec32_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, #-32 +; CHECK-NEXT: mov z0.d, #-66 // =0xffffffffffffffbe +; CHECK-NEXT: mov z2.d, #-2 // =0xfffffffffffffffe +; CHECK-NEXT: index z1.d, #0, x8 +; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: add z1.d, z1.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + store <4 x i64> , ptr %a, align 8 + ret void +} + +; Constant but not a sequence. +define void @build_vector_no_stride_v4i64(ptr %a) #0 { +; CHECK-LABEL: build_vector_no_stride_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: index z0.d, #1, #7 +; CHECK-NEXT: index z1.d, #0, #4 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + store <4 x i64> , ptr %a, align 8 + ret void +} + +define void @build_vector_0_inc2_v16f16(ptr %a) #0 { +; CHECK-LABEL: build_vector_0_inc2_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: adrp x9, .LCPI6_1 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI6_1] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + store <16 x half> , ptr %a, align 2 + ret void +} + +; Negative const stride. +define void @build_vector_0_dec3_v8f32(ptr %a) #0 { +; CHECK-LABEL: build_vector_0_dec3_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: adrp x9, .LCPI7_1 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI7_1] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + store <8 x float> , ptr %a, align 4 + ret void +} + +; Constant stride that's too big to be directly encoded into the index. +define void @build_vector_minus2_dec32_v4f64(ptr %a) #0 { +; CHECK-LABEL: build_vector_minus2_dec32_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: adrp x9, .LCPI8_1 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI8_1] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + store <4 x double> , ptr %a, align 8 + ret void +} + +; Constant but not a sequence. +define void @build_vector_no_stride_v4f64(ptr %a) #0 { +; CHECK-LABEL: build_vector_no_stride_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: adrp x9, .LCPI9_1 +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI9_1] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + store <4 x double> , ptr %a, align 8 + ret void +} + + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll @@ -0,0 +1,606 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; i8 +; + +define <8 x i8> @concat_v8i8(<4 x i8> %op1, <4 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: mov z3.h, z1.h[2] +; CHECK-NEXT: mov z4.h, z1.h[1] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #12] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strb w9, [sp, #8] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z5.h, z0.h[2] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: strb w10, [sp, #15] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: strb w9, [sp, #13] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strb w10, [sp, #11] +; CHECK-NEXT: strb w8, [sp, #10] +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %res = shufflevector <4 x i8> %op1, <4 x i8> %op2, <8 x i32> + ret <8 x i8> %res +} + +define <16 x i8> @concat_v16i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <8 x i8> %op1, <8 x i8> %op2, <16 x i32> + ret <16 x i8> %res +} + +define void @concat_v32i8(<16 x i8>* %a, <16 x i8>* %b, <32 x i8>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.b, p0, z1.b, z0.b +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: st1b { z1.b }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <16 x i8>, <16 x i8>* %a + %op2 = load <16 x i8>, <16 x i8>* %b + %res = shufflevector <16 x i8> %op1, <16 x i8> %op2, <32 x i32> + store <32 x i8> %res, <32 x i8>* %c + ret void +} + +define void @concat_v64i8(<32 x i8>* %a, <32 x i8>* %b, <64 x i8>* %c) #0 { +; CHECK-LABEL: concat_v64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = shufflevector <32 x i8> %op1, <32 x i8> %op2, <64 x i32> + store <64 x i8> %res, <64 x i8>* %c + ret void +} + +; +; i16 +; + +define <4 x i16> @concat_v4i16(<2 x i16> %op1, <2 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: strh w9, [sp, #8] +; CHECK-NEXT: strh w10, [sp, #14] +; CHECK-NEXT: strh w11, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %res = shufflevector <2 x i16> %op1, <2 x i16> %op2, <4 x i32> + ret <4 x i16> %res +} + +; Don't use SVE for 128-bit vectors. +define <8 x i16> @concat_v8i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <4 x i16> %op1, <4 x i16> %op2, <8 x i32> + ret <8 x i16> %res +} + +define void @concat_v16i16(<8 x i16>* %a, <8 x i16>* %b, <16 x i16>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.h, p0, z1.h, z0.h +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: st1h { z1.h }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <8 x i16>, <8 x i16>* %a + %op2 = load <8 x i16>, <8 x i16>* %b + %res = shufflevector <8 x i16> %op1, <8 x i16> %op2, <16 x i32> + store <16 x i16> %res, <16 x i16>* %c + ret void +} + +define void @concat_v32i16(<16 x i16>* %a, <16 x i16>* %b, <32 x i16>* %c) #0 { +; CHECK-LABEL: concat_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = shufflevector <16 x i16> %op1, <16 x i16> %op2, <32 x i32> + store <32 x i16> %res, <32 x i16>* %c + ret void +} + +; +; i32 +; + +; Don't use SVE for 64-bit vectors. +define <2 x i32> @concat_v2i32(<1 x i32> %op1, <1 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <1 x i32> %op1, <1 x i32> %op2, <2 x i32> + ret <2 x i32> %res +} + +; Don't use SVE for 128-bit vectors. +define <4 x i32> @concat_v4i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <2 x i32> %op1, <2 x i32> %op2, <4 x i32> + ret <4 x i32> %res +} + +define void @concat_v8i32(<4 x i32>* %a, <4 x i32>* %b, <8 x i32>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: st1w { z1.s }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <4 x i32>, <4 x i32>* %a + %op2 = load <4 x i32>, <4 x i32>* %b + %res = shufflevector <4 x i32> %op1, <4 x i32> %op2, <8 x i32> + store <8 x i32> %res, <8 x i32>* %c + ret void +} + +define void @concat_v16i32(<8 x i32>* %a, <8 x i32>* %b, <16 x i32>* %c) #0 { +; CHECK-LABEL: concat_v16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = shufflevector <8 x i32> %op1, <8 x i32> %op2, <16 x i32> + store <16 x i32> %res, <16 x i32>* %c + ret void +} + +; +; i64 +; + +; Don't use SVE for 128-bit vectors. +define <2 x i64> @concat_v2i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.d, p0, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <1 x i64> %op1, <1 x i64> %op2, <2 x i32> + ret <2 x i64> %res +} + +define void @concat_v4i64(<2 x i64>* %a, <2 x i64>* %b, <4 x i64>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.d, p0, z1.d, z0.d +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: st1d { z1.d }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <2 x i64>, <2 x i64>* %a + %op2 = load <2 x i64>, <2 x i64>* %b + %res = shufflevector <2 x i64> %op1, <2 x i64> %op2, <4 x i32> + store <4 x i64> %res, <4 x i64>* %c + ret void +} + +define void @concat_v8i64(<4 x i64>* %a, <4 x i64>* %b, <8 x i64>* %c) #0 { +; CHECK-LABEL: concat_v8i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = shufflevector <4 x i64> %op1, <4 x i64> %op2, <8 x i32> + store <8 x i64> %res, <8 x i64>* %c + ret void +} + +; +; f16 +; + +define <4 x half> @concat_v4f16(<2 x half> %op1, <2 x half> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: str h1, [sp, #12] +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: mov z1.h, z1.h[1] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: str h1, [sp, #14] +; CHECK-NEXT: str h0, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %res = shufflevector <2 x half> %op1, <2 x half> %op2, <4 x i32> + ret <4 x half> %res +} + +define <8 x half> @concat_v8f16(<4 x half> %op1, <4 x half> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <4 x half> %op1, <4 x half> %op2, <8 x i32> + ret <8 x half> %res +} + +define void @concat_v16f16(<8 x half>* %a, <8 x half>* %b, <16 x half>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.h, p0, z1.h, z0.h +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: st1h { z1.h }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <8 x half>, <8 x half>* %a + %op2 = load <8 x half>, <8 x half>* %b + %res = shufflevector <8 x half> %op1, <8 x half> %op2, <16 x i32> + store <16 x half> %res, <16 x half>* %c + ret void +} + +define void @concat_v32f16(<16 x half>* %a, <16 x half>* %b, <32 x half>* %c) #0 { +; CHECK-LABEL: concat_v32f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b + %res = shufflevector <16 x half> %op1, <16 x half> %op2, <32 x i32> + store <32 x half> %res, <32 x half>* %c + ret void +} + +; +; i32 +; + +; Don't use SVE for 64-bit vectors. +define <2 x float> @concat_v2f32(<1 x float> %op1, <1 x float> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <1 x float> %op1, <1 x float> %op2, <2 x i32> + ret <2 x float> %res +} + +; Don't use SVE for 128-bit vectors. +define <4 x float> @concat_v4f32(<2 x float> %op1, <2 x float> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <2 x float> %op1, <2 x float> %op2, <4 x i32> + ret <4 x float> %res +} + +define void @concat_v8f32(<4 x float>* %a, <4 x float>* %b, <8 x float>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: st1w { z1.s }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <4 x float>, <4 x float>* %a + %op2 = load <4 x float>, <4 x float>* %b + %res = shufflevector <4 x float> %op1, <4 x float> %op2, <8 x i32> + store <8 x float> %res, <8 x float>* %c + ret void +} + +define void @concat_v16f32(<8 x float>* %a, <8 x float>* %b, <16 x float>* %c) #0 { +; CHECK-LABEL: concat_v16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b + %res = shufflevector <8 x float> %op1, <8 x float> %op2, <16 x i32> + store <16 x float> %res, <16 x float>* %c + ret void +} + +; +; f64 +; + +; Don't use SVE for 128-bit vectors. +define <2 x double> @concat_v2f64(<1 x double> %op1, <1 x double> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: splice z0.d, p0, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = shufflevector <1 x double> %op1, <1 x double> %op2, <2 x i32> + ret <2 x double> %res +} + +define void @concat_v4f64(<2 x double>* %a, <2 x double>* %b, <4 x double>* %c) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x0] +; CHECK-NEXT: splice z1.d, p0, z1.d, z0.d +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: st1d { z1.d }, p0, [x2] +; CHECK-NEXT: ret + %op1 = load <2 x double>, <2 x double>* %a + %op2 = load <2 x double>, <2 x double>* %b + %res = shufflevector <2 x double> %op1, <2 x double> %op2, <4 x i32> + store <4 x double> %res, <4 x double>* %c + ret void +} + +define void @concat_v8f64(<4 x double>* %a, <4 x double>* %b, <8 x double>* %c) #0 { +; CHECK-LABEL: concat_v8f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: stp q0, q1, [x2, #32] +; CHECK-NEXT: stp q2, q3, [x2] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %res = shufflevector <4 x double> %op1, <4 x double> %op2, <8 x i32> + store <8 x double> %res, <8 x double>* %c + ret void +} + +; +; undef +; + +define void @concat_v32i8_undef(<16 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v32i8_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <16 x i8>, <16 x i8>* %a + %res = shufflevector <16 x i8> %op1, <16 x i8> undef, <32 x i32> + store <32 x i8> %res, <32 x i8>* %b + ret void +} + +define void @concat_v16i16_undef(<8 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v16i16_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <8 x i16>, <8 x i16>* %a + %res = shufflevector <8 x i16> %op1, <8 x i16> undef, <16 x i32> + store <16 x i16> %res, <16 x i16>* %b + ret void +} + +define void @concat_v8i32_undef(<4 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <4 x i32>, <4 x i32>* %a + %res = shufflevector <4 x i32> %op1, <4 x i32> undef, <8 x i32> + store <8 x i32> %res, <8 x i32>* %b + ret void +} + +define void @concat_v4i64_undef(<2 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4i64_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <2 x i64>, <2 x i64>* %a + %res = shufflevector <2 x i64> %op1, <2 x i64> undef, <4 x i32> + store <4 x i64> %res, <4 x i64>* %b + ret void +} + +; +; > 2 operands +; + +define void @concat_v32i8_4op(<8 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v32i8_4op: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <8 x i8>, <8 x i8>* %a + %shuffle = shufflevector <8 x i8> %op1, <8 x i8> undef, <16 x i32> + %res = shufflevector <16 x i8> %shuffle, <16 x i8> undef, <32 x i32> + store <32 x i8> %res, <32 x i8>* %b + ret void +} + +define void @concat_v16i16_4op(<4 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v16i16_4op: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <4 x i16>, <4 x i16>* %a + %shuffle = shufflevector <4 x i16> %op1, <4 x i16> undef, <8 x i32> + %res = shufflevector <8 x i16> %shuffle, <8 x i16> undef, <16 x i32> + store <16 x i16> %res, <16 x i16>* %b + ret void +} + +define void @concat_v8i32_4op(<2 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v8i32_4op: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <2 x i32>, <2 x i32>* %a + %shuffle = shufflevector <2 x i32> %op1, <2 x i32> undef, <4 x i32> + %res = shufflevector <4 x i32> %shuffle, <4 x i32> undef, <8 x i32> + store <8 x i32> %res, <8 x i32>* %b + ret void +} + +define void @concat_v4i64_4op(<1 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: concat_v4i64_4op: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: st1d { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %op1 = load <1 x i64>, <1 x i64>* %a + %shuffle = shufflevector <1 x i64> %op1, <1 x i64> undef, <2 x i32> + %res = shufflevector <2 x i64> %shuffle, <2 x i64> undef, <4 x i32> + store <4 x i64> %res, <4 x i64>* %b + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll @@ -0,0 +1,322 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; i1 + +define <4 x i1> @extract_subvector_v8i1(<8 x i1> %op) #0 { +; CHECK-LABEL: extract_subvector_v8i1: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.b, z0.b[7] +; CHECK-NEXT: mov z2.b, z0.b[6] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.b, z0.b[5] +; CHECK-NEXT: mov z0.b, z0.b[4] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: strh w11, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ret = call <4 x i1> @llvm.vector.extract.v4i1.v8i1(<8 x i1> %op, i64 4) + ret <4 x i1> %ret +} + +; i8 + +define <4 x i8> @extract_subvector_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: extract_subvector_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.b, z0.b[7] +; CHECK-NEXT: mov z2.b, z0.b[6] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.b, z0.b[5] +; CHECK-NEXT: mov z0.b, z0.b[4] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: strh w9, [sp, #12] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: strh w11, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ret = call <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4) + ret <4 x i8> %ret +} + +define <8 x i8> @extract_subvector_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: extract_subvector_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8) + ret <8 x i8> %ret +} + +define void @extract_subvector_v32i8(<32 x i8>* %a, <16 x i8>* %b) #0 { +; CHECK-LABEL: extract_subvector_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %ret = call <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16) + store <16 x i8> %ret, <16 x i8>* %b + ret void +} + +; i16 + +define <2 x i16> @extract_subvector_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: extract_subvector_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2) + ret <2 x i16> %ret +} + +define <4 x i16> @extract_subvector_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: extract_subvector_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4) + ret <4 x i16> %ret +} + +define void @extract_subvector_v16i16(<16 x i16>* %a, <8 x i16>* %b) #0 { +; CHECK-LABEL: extract_subvector_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %ret = call <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8) + store <8 x i16> %ret, <8 x i16>* %b + ret void +} + +; i32 + +define <1 x i32> @extract_subvector_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: extract_subvector_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z0.s, w8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1) + ret <1 x i32> %ret +} + +define <2 x i32> @extract_subvector_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: extract_subvector_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2) + ret <2 x i32> %ret +} + +define void @extract_subvector_v8i32(<8 x i32>* %a, <4 x i32>* %b) #0 { +; CHECK-LABEL: extract_subvector_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %ret = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4) + store <4 x i32> %ret, <4 x i32>* %b + ret void +} + +; i64 + +define <1 x i64> @extract_subvector_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: extract_subvector_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1) + ret <1 x i64> %ret +} + +define void @extract_subvector_v4i64(<4 x i64>* %a, <2 x i64>* %b) #0 { +; CHECK-LABEL: extract_subvector_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %ret = call <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2) + store <2 x i64> %ret, <2 x i64>* %b + ret void +} + +; f16 + +define <2 x half> @extract_subvector_v4f16(<4 x half> %op) #0 { +; CHECK-LABEL: extract_subvector_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: mov z0.h, z0.h[2] +; CHECK-NEXT: str h1, [sp, #10] +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ret = call <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2) + ret <2 x half> %ret +} + +define <4 x half> @extract_subvector_v8f16(<8 x half> %op) #0 { +; CHECK-LABEL: extract_subvector_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4) + ret <4 x half> %ret +} + +define void @extract_subvector_v16f16(<16 x half>* %a, <8 x half>* %b) #0 { +; CHECK-LABEL: extract_subvector_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <16 x half>, <16 x half>* %a + %ret = call <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8) + store <8 x half> %ret, <8 x half>* %b + ret void +} + +; f32 + +define <1 x float> @extract_subvector_v2f32(<2 x float> %op) #0 { +; CHECK-LABEL: extract_subvector_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: insr z0.s, s0 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1) + ret <1 x float> %ret +} + +define <2 x float> @extract_subvector_v4f32(<4 x float> %op) #0 { +; CHECK-LABEL: extract_subvector_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2) + ret <2 x float> %ret +} + +define void @extract_subvector_v8f32(<8 x float>* %a, <4 x float>* %b) #0 { +; CHECK-LABEL: extract_subvector_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <8 x float>, <8 x float>* %a + %ret = call <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4) + store <4 x float> %ret, <4 x float>* %b + ret void +} + +; f64 + +define <1 x double> @extract_subvector_v2f64(<2 x double> %op) #0 { +; CHECK-LABEL: extract_subvector_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %ret = call <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1) + ret <1 x double> %ret +} + +define void @extract_subvector_v4f64(<4 x double>* %a, <2 x double>* %b) #0 { +; CHECK-LABEL: extract_subvector_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %op = load <4 x double>, <4 x double>* %a + %ret = call <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2) + store <2 x double> %ret, <2 x double>* %b + ret void +} + +declare <4 x i1> @llvm.vector.extract.v4i1.v8i1(<8 x i1>, i64) + +declare <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8>, i64) +declare <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8>, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8>, i64) +declare <32 x i8> @llvm.vector.extract.v32i8.v64i8(<64 x i8>, i64) + +declare <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16>, i64) +declare <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16>, i64) +declare <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16>, i64) +declare <16 x i16> @llvm.vector.extract.v16i16.v32i16(<32 x i16>, i64) + +declare <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32>, i64) +declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32>, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64) +declare <8 x i32> @llvm.vector.extract.v8i32.v16i32(<16 x i32>, i64) + +declare <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64>, i64) +declare <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64>, i64) +declare <4 x i64> @llvm.vector.extract.v4i64.v8i64(<8 x i64>, i64) + +declare <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half>, i64) +declare <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half>, i64) +declare <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half>, i64) +declare <16 x half> @llvm.vector.extract.v16f16.v32f16(<32 x half>, i64) + +declare <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float>, i64) +declare <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float>, i64) +declare <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float>, i64) +declare <8 x float> @llvm.vector.extract.v8f32.v16f32(<16 x float>, i64) + +declare <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double>, i64) +declare <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double>, i64) +declare <4 x double> @llvm.vector.extract.v4f64.v8f64(<8 x double>, i64) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll @@ -0,0 +1,122 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; extractelement +; + +define half @extractelement_v2f16(<2 x half> %op1) #0 { +; CHECK-LABEL: extractelement_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <2 x half> %op1, i64 1 + ret half %r +} + +define half @extractelement_v4f16(<4 x half> %op1) #0 { +; CHECK-LABEL: extractelement_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <4 x half> %op1, i64 3 + ret half %r +} + +define half @extractelement_v8f16(<8 x half> %op1) #0 { +; CHECK-LABEL: extractelement_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <8 x half> %op1, i64 7 + ret half %r +} + +define half @extractelement_v16f16(<16 x half>* %a) #0 { +; CHECK-LABEL: extractelement_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0 +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %r = extractelement <16 x half> %op1, i64 15 + ret half %r +} + +define float @extractelement_v2f32(<2 x float> %op1) #0 { +; CHECK-LABEL: extractelement_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <2 x float> %op1, i64 1 + ret float %r +} + +define float @extractelement_v4f32(<4 x float> %op1) #0 { +; CHECK-LABEL: extractelement_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <4 x float> %op1, i64 3 + ret float %r +} + +define float @extractelement_v8f32(<8 x float>* %a) #0 { +; CHECK-LABEL: extractelement_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0 +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %r = extractelement <8 x float> %op1, i64 7 + ret float %r +} + +define double @extractelement_v1f64(<1 x double> %op1) #0 { +; CHECK-LABEL: extractelement_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <1 x double> %op1, i64 0 + ret double %r +} + +define double @extractelement_v2f64(<2 x double> %op1) #0 { +; CHECK-LABEL: extractelement_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = extractelement <2 x double> %op1, i64 1 + ret double %r +} + +define double @extractelement_v4f64(<4 x double>* %a) #0 { +; CHECK-LABEL: extractelement_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %r = extractelement <4 x double> %op1, i64 3 + ret double %r +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll @@ -6,8 +6,8 @@ define <4 x i8> @load_v4i8(<4 x i8>* %a) #0 { ; CHECK-LABEL: load_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %load = load <4 x i8>, <4 x i8>* %a @@ -44,12 +44,14 @@ define <2 x i16> @load_v2i16(<2 x i16>* %a) #0 { ; CHECK-LABEL: load_v2i16: ; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: ldrh w8, [x0, #2] -; CHECK-NEXT: ldrh w9, [x0] -; CHECK-NEXT: fmov s0, w8 -; CHECK-NEXT: fmov s1, w9 -; CHECK-NEXT: zip1 z0.s, z1.s, z0.s -; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %load = load <2 x i16>, <2 x i16>* %a ret <2 x i16> %load diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll @@ -0,0 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; Currently there is no custom lowering for vector shuffles operating on types +; bigger than NEON. However, having no support opens us up to a code generator +; hang when expanding BUILD_VECTOR. Here we just validate the promblematic case +; successfully exits code generation. +define void @hang_when_merging_stores_after_legalisation(<8 x i32>* %a, <2 x i32> %b) #0 { +; CHECK-LABEL: hang_when_merging_stores_after_legalisation: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: stp w8, w8, [sp, #8] +; CHECK-NEXT: stp w8, w8, [sp] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: stp q0, q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <8 x i32> zeroinitializer + %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> undef, <8 x i32> + store <8 x i32> %interleaved.vec, <8 x i32>* %a, align 4 + ret void +} + +; Ensure we don't crash when trying to lower a shuffle via an extract +define void @crash_when_lowering_extract_shuffle(<32 x i32>* %dst, i1 %cond) #0 { +; CHECK-LABEL: crash_when_lowering_extract_shuffle: +; CHECK: // %bb.0: +; CHECK-NEXT: ret + %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer + br i1 %cond, label %exit, label %vector.body + +vector.body: + %1 = load <32 x i32>, <32 x i32>* %dst, align 16 + %predphi = select <32 x i1> %broadcast.splat, <32 x i32> zeroinitializer, <32 x i32> %1 + store <32 x i32> %predphi, <32 x i32>* %dst, align 16 + br label %exit + +exit: + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll @@ -65,7 +65,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: adrp x8, .LCPI5_0 ; CHECK-NEXT: ldr d0, [x8, :lo12:.LCPI5_0] -; CHECK-NEXT: str s0, [x0] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str w8, [x0] ; CHECK-NEXT: ret store <2 x half> zeroinitializer, <2 x half>* %a ret void