diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1611,7 +1611,11 @@ } void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) { + setOperationAction(ISD::ANY_EXTEND, VT, Custom); + setOperationAction(ISD::ZERO_EXTEND, VT, Custom); + setOperationAction(ISD::SIGN_EXTEND, VT, Custom); setOperationAction(ISD::LOAD, VT, Custom); + setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); } void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { @@ -5774,8 +5778,8 @@ case ISD::MLOAD: return LowerMLOAD(Op, DAG); case ISD::LOAD: - if (useSVEForFixedLengthVectorVT(Op.getValueType(), - Subtarget->forceStreamingModeCompatibleSVE())) + if (useSVEForFixedLengthVectorVT( + Op.getValueType(), Subtarget->forceStreamingModeCompatibleSVE())) return LowerFixedLengthVectorLoadToSVE(Op, DAG); return LowerLOAD(Op, DAG); case ISD::ADD: @@ -10896,7 +10900,8 @@ ShuffleVectorSDNode *SVN = cast(Op.getNode()); - if (useSVEForFixedLengthVectorVT(VT)) + if (useSVEForFixedLengthVectorVT( + VT, Subtarget->forceStreamingModeCompatibleSVE())) return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG); // Convert shuffles that are directly supported on NEON to target-specific @@ -11587,8 +11592,8 @@ EVT VT = Op.getValueType(); // override NEON if possible. - if (useSVEForFixedLengthVectorVT(VT, - Subtarget->forceStreamingModeCompatibleSVE())) { + if (useSVEForFixedLengthVectorVT( + VT, Subtarget->forceStreamingModeCompatibleSVE())) { if (auto SeqInfo = cast(Op)->isConstantSequence()) { SDLoc DL(Op); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); @@ -11918,7 +11923,8 @@ SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { - if (useSVEForFixedLengthVectorVT(Op.getValueType())) + if (useSVEForFixedLengthVectorVT( + Op.getValueType(), Subtarget->forceStreamingModeCompatibleSVE())) return LowerFixedLengthConcatVectorsToSVE(Op, DAG); assert(Op.getValueType().isScalableVector() && @@ -12025,7 +12031,8 @@ } // try overriding NEON if possible. - if (useSVEForFixedLengthVectorVT(VT)) + if (useSVEForFixedLengthVectorVT( + VT, Subtarget->forceStreamingModeCompatibleSVE())) return LowerFixedLengthExtractVectorElt(Op, DAG); // Check for non-constant or out of range lane. @@ -12084,13 +12091,16 @@ if (Idx == 0 && InVT.getSizeInBits() <= 128) return Op; - // If this is extracting the upper 64-bits of a 128-bit vector, we match - // that directly. - if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 && - InVT.getSizeInBits() == 128) - return Op; + if (!Subtarget->forceStreamingModeCompatibleSVE()) { + // If this is extracting the upper 64-bits of a 128-bit vector, we match + // that directly. + if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 && + InVT.getSizeInBits() == 128) + return Op; + } - if (useSVEForFixedLengthVectorVT(InVT)) { + if (useSVEForFixedLengthVectorVT( + InVT, Subtarget->forceStreamingModeCompatibleSVE())) { SDLoc DL(Op); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); @@ -12389,7 +12399,9 @@ switch (Op.getOpcode()) { case ISD::SHL: // override NEON if possible. - if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) + if (VT.isScalableVector() || + useSVEForFixedLengthVectorVT( + VT, Subtarget->forceStreamingModeCompatibleSVE())) return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED); if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) @@ -12402,7 +12414,9 @@ case ISD::SRA: case ISD::SRL: // override NEON if possible. - if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) { + if (VT.isScalableVector() || + useSVEForFixedLengthVectorVT( + VT, Subtarget->forceStreamingModeCompatibleSVE())) { unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED : AArch64ISD::SRL_PRED; return LowerToPredicatedOp(Op, DAG, Opc); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -218,6 +218,8 @@ def UseScalarIncVL : Predicate<"Subtarget->useScalarIncVL()">; +def IsStreamingModeCompatibleSVEDisabled : Predicate<"!Subtarget->forceStreamingModeCompatibleSVE()">; + def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER", SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisInt<1>]>>; diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -3032,30 +3032,31 @@ (EXTRACT_SUBREG (DUP_ZZI_D ZPR:$vec, sve_elm_idx_extdup_d:$index), dsub)>; // Extract element from vector with immediate index that's within the bottom 128-bits. - let AddedComplexity = 1 in { - def : Pat<(i32 (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), - (i32 (UMOVvi8 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; - def : Pat<(i32 (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index)), - (i32 (UMOVvi16 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index))>; - def : Pat<(i32 (vector_extract (nxv4i32 ZPR:$vec), VectorIndexS:$index)), - (i32 (UMOVvi32 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>; - def : Pat<(i64 (vector_extract (nxv2i64 ZPR:$vec), VectorIndexD:$index)), - (i64 (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index))>; - } - - def : Pat<(sext_inreg (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index), i8), - (i32 (SMOVvi8to32 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; - def : Pat<(sext_inreg (anyext (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), i8), - (i64 (SMOVvi8to64 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; + let Predicates = [IsStreamingModeCompatibleSVEDisabled] in { + let AddedComplexity = 1 in { + def : Pat<(i32 (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), + (i32 (UMOVvi8 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; + def : Pat<(i32 (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index)), + (i32 (UMOVvi16 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index))>; + def : Pat<(i32 (vector_extract (nxv4i32 ZPR:$vec), VectorIndexS:$index)), + (i32 (UMOVvi32 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>; + def : Pat<(i64 (vector_extract (nxv2i64 ZPR:$vec), VectorIndexD:$index)), + (i64 (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index))>; + } - def : Pat<(sext_inreg (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index), i16), - (i32 (SMOVvi16to32 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index))>; - def : Pat<(sext_inreg (anyext (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index)), i16), - (i64 (SMOVvi16to64 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index))>; + def : Pat<(sext_inreg (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index), i8), + (i32 (SMOVvi8to32 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; + def : Pat<(sext_inreg (anyext (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), i8), + (i64 (SMOVvi8to64 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>; - def : Pat<(sext (vector_extract (nxv4i32 ZPR:$vec), VectorIndexS:$index)), - (i64 (SMOVvi32to64 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>; + def : Pat<(sext_inreg (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index), i16), + (i32 (SMOVvi16to32 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index))>; + def : Pat<(sext_inreg (anyext (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index)), i16), + (i64 (SMOVvi16to64 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index))>; + def : Pat<(sext (vector_extract (nxv4i32 ZPR:$vec), VectorIndexS:$index)), + (i64 (SMOVvi32to64 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>; + } // Extract first element from vector. let AddedComplexity = 2 in { def : Pat<(vector_extract (nxv16i8 ZPR:$Zs), (i64 0)), diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -67,7 +67,7 @@ static cl::opt ForceStreamingModeCompatibleSVE("force-streaming-mode-compatible-sve", - cl::init(false), cl::Hidden); + cl::init(false), cl::Hidden); unsigned AArch64Subtarget::getVectorInsertExtractBaseCost() const { if (OverrideVectorInsertExtractBaseCost.getNumOccurrences() > 0) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll @@ -0,0 +1,372 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -aarch64-sve-vector-bits-min=128 -force-streaming-mode-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_128 +; RUN: llc -aarch64-sve-vector-bits-min=1024 -force-streaming-mode-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=2048 -force-streaming-mode-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_2048 + +target triple = "aarch64-unknown-linux-gnu" + +define <8 x i16> @load_zext_v8i8i16(<8 x i8>* %ap) #0 { +; CHECK-LABEL: load_zext_v8i8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #4 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ld1b { z1.h }, p0/z, [x0, x8] +; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %ap + %val = zext <8 x i8> %a to <8 x i16> + ret <8 x i16> %val +} + +define <4 x i32> @load_zext_v4i16i32(<4 x i16>* %ap) #0 { +; CHECK-LABEL: load_zext_v4i16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i16>, <4 x i16>* %ap + %val = zext <4 x i16> %a to <4 x i32> + ret <4 x i32> %val +} + +define <2 x i64> @load_zext_v2i32i64(<2 x i32>* %ap) #0 { +; CHECK-LABEL: load_zext_v2i32i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <2 x i32>, <2 x i32>* %ap + %val = zext <2 x i32> %a to <2 x i64> + ret <2 x i64> %val +} + +define <2 x i256> @load_zext_v2i64i256(<2 x i64>* %ap) #0 { +; VBITS_GE_128-LABEL: load_zext_v2i64i256: +; VBITS_GE_128: // %bb.0: +; VBITS_GE_128-NEXT: adrp x8, .LCPI3_0 +; VBITS_GE_128-NEXT: add x8, x8, :lo12:.LCPI3_0 +; VBITS_GE_128-NEXT: ptrue p0.d, vl2 +; VBITS_GE_128-NEXT: mov x1, xzr +; VBITS_GE_128-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_128-NEXT: mov x5, xzr +; VBITS_GE_128-NEXT: ld1d { z1.d }, p0/z, [x8] +; VBITS_GE_128-NEXT: mov z2.d, z0.d[1] +; VBITS_GE_128-NEXT: fmov x0, d0 +; VBITS_GE_128-NEXT: mov z0.d, z1.d[1] +; VBITS_GE_128-NEXT: fmov x2, d1 +; VBITS_GE_128-NEXT: fmov x3, d0 +; VBITS_GE_128-NEXT: fmov x4, d2 +; VBITS_GE_128-NEXT: mov x6, x2 +; VBITS_GE_128-NEXT: mov x7, x3 +; VBITS_GE_128-NEXT: ret +; +; VBITS_GE_1024-LABEL: load_zext_v2i64i256: +; VBITS_GE_1024: // %bb.0: +; VBITS_GE_1024-NEXT: ptrue p0.d, vl2 +; VBITS_GE_1024-NEXT: mov x1, xzr +; VBITS_GE_1024-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_1024-NEXT: mov x2, xzr +; VBITS_GE_1024-NEXT: mov x3, xzr +; VBITS_GE_1024-NEXT: mov x5, xzr +; VBITS_GE_1024-NEXT: mov x6, xzr +; VBITS_GE_1024-NEXT: mov x7, xzr +; VBITS_GE_1024-NEXT: mov z1.d, z0.d[1] +; VBITS_GE_1024-NEXT: fmov x0, d0 +; VBITS_GE_1024-NEXT: fmov x4, d1 +; VBITS_GE_1024-NEXT: ret +; +; VBITS_GE_2048-LABEL: load_zext_v2i64i256: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.d, vl2 +; VBITS_GE_2048-NEXT: mov x1, xzr +; VBITS_GE_2048-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: mov x2, xzr +; VBITS_GE_2048-NEXT: mov x3, xzr +; VBITS_GE_2048-NEXT: mov x5, xzr +; VBITS_GE_2048-NEXT: mov x6, xzr +; VBITS_GE_2048-NEXT: mov x7, xzr +; VBITS_GE_2048-NEXT: mov z1.d, z0.d[1] +; VBITS_GE_2048-NEXT: fmov x0, d0 +; VBITS_GE_2048-NEXT: fmov x4, d1 +; VBITS_GE_2048-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %ap + %val = zext <2 x i64> %a to <2 x i256> + ret <2 x i256> %val +} + +define <16 x i32> @load_sext_v16i8i32(<16 x i8>* %ap) #0 { +; VBITS_GE_128-LABEL: load_sext_v16i8i32: +; VBITS_GE_128: // %bb.0: +; VBITS_GE_128-NEXT: ptrue p0.b, vl16 +; VBITS_GE_128-NEXT: ld1b { z1.b }, p0/z, [x0] +; VBITS_GE_128-NEXT: sunpklo z3.h, z1.b +; VBITS_GE_128-NEXT: ext z1.b, z1.b, z1.b, #8 +; VBITS_GE_128-NEXT: sunpklo z4.h, z1.b +; VBITS_GE_128-NEXT: sunpklo z0.s, z3.h +; VBITS_GE_128-NEXT: ext z3.b, z3.b, z3.b, #8 +; VBITS_GE_128-NEXT: sunpklo z2.s, z4.h +; VBITS_GE_128-NEXT: ext z4.b, z4.b, z4.b, #8 +; VBITS_GE_128-NEXT: sunpklo z1.s, z3.h +; VBITS_GE_128-NEXT: sunpklo z3.s, z4.h +; VBITS_GE_128-NEXT: // kill: def $q0 killed $q0 killed $z0 +; VBITS_GE_128-NEXT: // kill: def $q1 killed $q1 killed $z1 +; VBITS_GE_128-NEXT: // kill: def $q2 killed $q2 killed $z2 +; VBITS_GE_128-NEXT: // kill: def $q3 killed $q3 killed $z3 +; VBITS_GE_128-NEXT: ret +; +; VBITS_GE_1024-LABEL: load_sext_v16i8i32: +; VBITS_GE_1024: // %bb.0: +; VBITS_GE_1024-NEXT: ptrue p0.s, vl16 +; VBITS_GE_1024-NEXT: ld1sb { z0.s }, p0/z, [x0] +; VBITS_GE_1024-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_1024-NEXT: ret +; +; VBITS_GE_2048-LABEL: load_sext_v16i8i32: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.s, vl16 +; VBITS_GE_2048-NEXT: ld1sb { z0.s }, p0/z, [x0] +; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %ap + %val = sext <16 x i8> %a to <16 x i32> + ret <16 x i32> %val +} + +define <8 x i32> @load_sext_v8i16i32(<8 x i16>* %ap) #0 { +; VBITS_GE_128-LABEL: load_sext_v8i16i32: +; VBITS_GE_128: // %bb.0: +; VBITS_GE_128-NEXT: ptrue p0.h, vl8 +; VBITS_GE_128-NEXT: ld1h { z1.h }, p0/z, [x0] +; VBITS_GE_128-NEXT: sunpklo z0.s, z1.h +; VBITS_GE_128-NEXT: ext z1.b, z1.b, z1.b, #8 +; VBITS_GE_128-NEXT: sunpklo z1.s, z1.h +; VBITS_GE_128-NEXT: // kill: def $q0 killed $q0 killed $z0 +; VBITS_GE_128-NEXT: // kill: def $q1 killed $q1 killed $z1 +; VBITS_GE_128-NEXT: ret +; +; VBITS_GE_1024-LABEL: load_sext_v8i16i32: +; VBITS_GE_1024: // %bb.0: +; VBITS_GE_1024-NEXT: ptrue p0.s, vl8 +; VBITS_GE_1024-NEXT: ld1sh { z0.s }, p0/z, [x0] +; VBITS_GE_1024-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_1024-NEXT: ret +; +; VBITS_GE_2048-LABEL: load_sext_v8i16i32: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.s, vl8 +; VBITS_GE_2048-NEXT: ld1sh { z0.s }, p0/z, [x0] +; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %ap + %val = sext <8 x i16> %a to <8 x i32> + ret <8 x i32> %val +} + +define <4 x i256> @load_sext_v4i32i256(<4 x i32>* %ap) #0 { +; CHECK-LABEL: load_sext_v4i32i256: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: sunpklo z1.d, z0.s +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: sunpklo z0.d, z0.s +; CHECK-NEXT: asr x10, x9, #63 +; CHECK-NEXT: asr x12, x11, #63 +; CHECK-NEXT: stp x9, x10, [x8] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: stp x11, x12, [x8, #32] +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: stp x10, x10, [x8, #16] +; CHECK-NEXT: stp x12, x12, [x8, #48] +; CHECK-NEXT: asr x10, x9, #63 +; CHECK-NEXT: asr x12, x11, #63 +; CHECK-NEXT: stp x10, x10, [x8, #80] +; CHECK-NEXT: stp x9, x10, [x8, #64] +; CHECK-NEXT: stp x12, x12, [x8, #112] +; CHECK-NEXT: stp x11, x12, [x8, #96] +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %ap + %val = sext <4 x i32> %a to <4 x i256> + ret <4 x i256> %val +} + +define <2 x i256> @load_sext_v2i64i256(<2 x i64>* %ap) #0 { +; VBITS_GE_128-LABEL: load_sext_v2i64i256: +; VBITS_GE_128: // %bb.0: +; VBITS_GE_128-NEXT: ptrue p0.d, vl2 +; VBITS_GE_128-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_128-NEXT: mov z1.d, z0.d[1] +; VBITS_GE_128-NEXT: fmov x8, d0 +; VBITS_GE_128-NEXT: fmov x9, d1 +; VBITS_GE_128-NEXT: asr x8, x8, #63 +; VBITS_GE_128-NEXT: asr x9, x9, #63 +; VBITS_GE_128-NEXT: fmov d2, x8 +; VBITS_GE_128-NEXT: fmov d3, x9 +; VBITS_GE_128-NEXT: zip1 z0.d, z0.d, z2.d +; VBITS_GE_128-NEXT: zip1 z2.d, z2.d, z2.d +; VBITS_GE_128-NEXT: zip1 z1.d, z1.d, z3.d +; VBITS_GE_128-NEXT: mov z4.d, z0.d[1] +; VBITS_GE_128-NEXT: fmov x0, d0 +; VBITS_GE_128-NEXT: mov z0.d, z2.d[1] +; VBITS_GE_128-NEXT: fmov x2, d2 +; VBITS_GE_128-NEXT: mov z2.d, z1.d[1] +; VBITS_GE_128-NEXT: fmov x4, d1 +; VBITS_GE_128-NEXT: zip1 z1.d, z3.d, z3.d +; VBITS_GE_128-NEXT: fmov x3, d0 +; VBITS_GE_128-NEXT: mov z0.d, z1.d[1] +; VBITS_GE_128-NEXT: fmov x1, d4 +; VBITS_GE_128-NEXT: fmov x6, d1 +; VBITS_GE_128-NEXT: fmov x5, d2 +; VBITS_GE_128-NEXT: fmov x7, d0 +; VBITS_GE_128-NEXT: ret +; +; VBITS_GE_1024-LABEL: load_sext_v2i64i256: +; VBITS_GE_1024: // %bb.0: +; VBITS_GE_1024-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; VBITS_GE_1024-NEXT: .cfi_def_cfa_offset 16 +; VBITS_GE_1024-NEXT: mov x29, sp +; VBITS_GE_1024-NEXT: .cfi_def_cfa w29, 16 +; VBITS_GE_1024-NEXT: .cfi_offset w30, -8 +; VBITS_GE_1024-NEXT: .cfi_offset w29, -16 +; VBITS_GE_1024-NEXT: sub x9, sp, #112 +; VBITS_GE_1024-NEXT: and sp, x9, #0xffffffffffffffc0 +; VBITS_GE_1024-NEXT: ptrue p0.d, vl2 +; VBITS_GE_1024-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_1024-NEXT: ptrue p0.d, vl8 +; VBITS_GE_1024-NEXT: fmov x8, d0 +; VBITS_GE_1024-NEXT: mov z0.d, z0.d[1] +; VBITS_GE_1024-NEXT: fmov x10, d0 +; VBITS_GE_1024-NEXT: asr x9, x8, #63 +; VBITS_GE_1024-NEXT: asr x11, x10, #63 +; VBITS_GE_1024-NEXT: stp x9, x9, [sp, #16] +; VBITS_GE_1024-NEXT: stp x8, x9, [sp] +; VBITS_GE_1024-NEXT: stp x11, x11, [sp, #48] +; VBITS_GE_1024-NEXT: stp x10, x11, [sp, #32] +; VBITS_GE_1024-NEXT: ld1d { z0.d }, p0/z, [sp] +; VBITS_GE_1024-NEXT: mov z1.d, z0.d[1] +; VBITS_GE_1024-NEXT: mov z2.d, z0.d[2] +; VBITS_GE_1024-NEXT: mov z3.d, z0.d[3] +; VBITS_GE_1024-NEXT: mov z4.d, z0.d[4] +; VBITS_GE_1024-NEXT: mov z5.d, z0.d[5] +; VBITS_GE_1024-NEXT: mov z6.d, z0.d[6] +; VBITS_GE_1024-NEXT: mov z7.d, z0.d[7] +; VBITS_GE_1024-NEXT: fmov x0, d0 +; VBITS_GE_1024-NEXT: fmov x1, d1 +; VBITS_GE_1024-NEXT: fmov x2, d2 +; VBITS_GE_1024-NEXT: fmov x3, d3 +; VBITS_GE_1024-NEXT: fmov x4, d4 +; VBITS_GE_1024-NEXT: fmov x5, d5 +; VBITS_GE_1024-NEXT: fmov x6, d6 +; VBITS_GE_1024-NEXT: fmov x7, d7 +; VBITS_GE_1024-NEXT: mov sp, x29 +; VBITS_GE_1024-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; VBITS_GE_1024-NEXT: ret +; +; VBITS_GE_2048-LABEL: load_sext_v2i64i256: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; VBITS_GE_2048-NEXT: .cfi_def_cfa_offset 16 +; VBITS_GE_2048-NEXT: mov x29, sp +; VBITS_GE_2048-NEXT: .cfi_def_cfa w29, 16 +; VBITS_GE_2048-NEXT: .cfi_offset w30, -8 +; VBITS_GE_2048-NEXT: .cfi_offset w29, -16 +; VBITS_GE_2048-NEXT: sub x9, sp, #112 +; VBITS_GE_2048-NEXT: and sp, x9, #0xffffffffffffffc0 +; VBITS_GE_2048-NEXT: ptrue p0.d, vl2 +; VBITS_GE_2048-NEXT: ld1d { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl8 +; VBITS_GE_2048-NEXT: fmov x8, d0 +; VBITS_GE_2048-NEXT: mov z0.d, z0.d[1] +; VBITS_GE_2048-NEXT: fmov x10, d0 +; VBITS_GE_2048-NEXT: asr x9, x8, #63 +; VBITS_GE_2048-NEXT: asr x11, x10, #63 +; VBITS_GE_2048-NEXT: stp x9, x9, [sp, #16] +; VBITS_GE_2048-NEXT: stp x8, x9, [sp] +; VBITS_GE_2048-NEXT: stp x11, x11, [sp, #48] +; VBITS_GE_2048-NEXT: stp x10, x11, [sp, #32] +; VBITS_GE_2048-NEXT: ld1d { z0.d }, p0/z, [sp] +; VBITS_GE_2048-NEXT: mov z1.d, z0.d[1] +; VBITS_GE_2048-NEXT: mov z2.d, z0.d[2] +; VBITS_GE_2048-NEXT: mov z3.d, z0.d[3] +; VBITS_GE_2048-NEXT: mov z4.d, z0.d[4] +; VBITS_GE_2048-NEXT: mov z5.d, z0.d[5] +; VBITS_GE_2048-NEXT: mov z6.d, z0.d[6] +; VBITS_GE_2048-NEXT: mov z7.d, z0.d[7] +; VBITS_GE_2048-NEXT: fmov x0, d0 +; VBITS_GE_2048-NEXT: fmov x1, d1 +; VBITS_GE_2048-NEXT: fmov x2, d2 +; VBITS_GE_2048-NEXT: fmov x3, d3 +; VBITS_GE_2048-NEXT: fmov x4, d4 +; VBITS_GE_2048-NEXT: fmov x5, d5 +; VBITS_GE_2048-NEXT: fmov x6, d6 +; VBITS_GE_2048-NEXT: fmov x7, d7 +; VBITS_GE_2048-NEXT: mov sp, x29 +; VBITS_GE_2048-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; VBITS_GE_2048-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %ap + %val = sext <2 x i64> %a to <2 x i256> + ret <2 x i256> %val +} + +define <16 x i64> @load_zext_v16i16i64(<16 x i16>* %ap) #0 { +; VBITS_GE_128-LABEL: load_zext_v16i16i64: +; VBITS_GE_128: // %bb.0: +; VBITS_GE_128-NEXT: mov x8, #8 +; VBITS_GE_128-NEXT: ptrue p0.h, vl8 +; VBITS_GE_128-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] +; VBITS_GE_128-NEXT: ld1h { z1.h }, p0/z, [x0] +; VBITS_GE_128-NEXT: uunpklo z2.s, z0.h +; VBITS_GE_128-NEXT: ext z0.b, z0.b, z0.b, #8 +; VBITS_GE_128-NEXT: uunpklo z3.s, z1.h +; VBITS_GE_128-NEXT: ext z1.b, z1.b, z1.b, #8 +; VBITS_GE_128-NEXT: uunpklo z4.d, z2.s +; VBITS_GE_128-NEXT: ext z2.b, z2.b, z2.b, #8 +; VBITS_GE_128-NEXT: uunpklo z7.s, z0.h +; VBITS_GE_128-NEXT: uunpklo z16.s, z1.h +; VBITS_GE_128-NEXT: uunpklo z0.d, z3.s +; VBITS_GE_128-NEXT: ext z3.b, z3.b, z3.b, #8 +; VBITS_GE_128-NEXT: uunpklo z5.d, z2.s +; VBITS_GE_128-NEXT: uunpklo z6.d, z7.s +; VBITS_GE_128-NEXT: ext z7.b, z7.b, z7.b, #8 +; VBITS_GE_128-NEXT: uunpklo z2.d, z16.s +; VBITS_GE_128-NEXT: ext z16.b, z16.b, z16.b, #8 +; VBITS_GE_128-NEXT: uunpklo z1.d, z3.s +; VBITS_GE_128-NEXT: uunpklo z7.d, z7.s +; VBITS_GE_128-NEXT: uunpklo z3.d, z16.s +; VBITS_GE_128-NEXT: // kill: def $q0 killed $q0 killed $z0 +; VBITS_GE_128-NEXT: // kill: def $q1 killed $q1 killed $z1 +; VBITS_GE_128-NEXT: // kill: def $q2 killed $q2 killed $z2 +; VBITS_GE_128-NEXT: // kill: def $q3 killed $q3 killed $z3 +; VBITS_GE_128-NEXT: // kill: def $q4 killed $q4 killed $z4 +; VBITS_GE_128-NEXT: // kill: def $q5 killed $q5 killed $z5 +; VBITS_GE_128-NEXT: // kill: def $q6 killed $q6 killed $z6 +; VBITS_GE_128-NEXT: // kill: def $q7 killed $q7 killed $z7 +; VBITS_GE_128-NEXT: ret +; +; VBITS_GE_1024-LABEL: load_zext_v16i16i64: +; VBITS_GE_1024: // %bb.0: +; VBITS_GE_1024-NEXT: ptrue p0.d, vl16 +; VBITS_GE_1024-NEXT: ld1h { z0.d }, p0/z, [x0] +; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_1024-NEXT: ret +; +; VBITS_GE_2048-LABEL: load_zext_v16i16i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.d, vl16 +; VBITS_GE_2048-NEXT: ld1h { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %ap + %val = zext <16 x i16> %a to <16 x i64> + ret <16 x i64> %val +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll @@ -0,0 +1,177 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -aarch64-sve-vector-bits-min=128 -force-streaming-mode-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_128_STREAMING +; RUN: llc -aarch64-sve-vector-bits-min=256 -force-streaming-mode-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256_STREAMING +; RUN: llc -aarch64-sve-vector-bits-min=512 -force-streaming-mode-compatible-sve < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512_STREAMING + +target triple = "aarch64-unknown-linux-gnu" + +; +; Masked Load +; + +define <4 x i8> @masked_load_v4i8(<4 x i8>* %src, <4 x i1> %mask) #0 { +; CHECK-LABEL: masked_load_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI0_0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x8] +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %load = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 8, <4 x i1> %mask, <4 x i8> zeroinitializer) + ret <4 x i8> %load +} + + +define <16 x i8> @masked_load_v16i8(<16 x i8>* %src, <16 x i1> %mask) #0 { +; CHECK-LABEL: masked_load_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI1_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x8] +; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %src, i32 8, <16 x i1> %mask, <16 x i8> zeroinitializer) + ret <16 x i8> %load +} + +define <2 x half> @masked_load_v2f16(<2 x half>* %src, <2 x i1> %mask) #0 { +; CHECK-LABEL: masked_load_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str wzr, [sp, #12] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI2_0 +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] +; CHECK-NEXT: add x8, sp, #8 +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x8] +; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %load = call <2 x half> @llvm.masked.load.v2f16(<2 x half>* %src, i32 8, <2 x i1> %mask, <2 x half> zeroinitializer) + ret <2 x half> %load +} + +define <4 x float> @masked_load_v4f32(<4 x float>* %src, <4 x i1> %mask) vscale_range(1,16) #0 { +; CHECK-LABEL: masked_load_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI3_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8] +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %load = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %src, i32 8, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load +} + +define <2 x double> @masked_load_v2f64(<2 x double>* %src, <2 x i1> %mask) #0 { +; CHECK-LABEL: masked_load_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI4_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x8] +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %load = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %src, i32 8, <2 x i1> %mask, <2 x double> zeroinitializer) + ret <2 x double> %load +} + +define <4 x double> @masked_load_v4f64(<4 x double>* %src, <4 x i1> %mask) #0 { +; VBITS_GE_128_STREAMING-LABEL: masked_load_v4f64: +; VBITS_GE_128_STREAMING: // %bb.0: +; VBITS_GE_128_STREAMING-NEXT: adrp x8, .LCPI5_0 +; VBITS_GE_128_STREAMING-NEXT: add x8, x8, :lo12:.LCPI5_0 +; VBITS_GE_128_STREAMING-NEXT: ptrue p0.d, vl2 +; VBITS_GE_128_STREAMING-NEXT: // kill: def $d0 killed $d0 def $z0 +; VBITS_GE_128_STREAMING-NEXT: uunpklo z0.s, z0.h +; VBITS_GE_128_STREAMING-NEXT: ld1d { z1.d }, p0/z, [x8] +; VBITS_GE_128_STREAMING-NEXT: uunpklo z2.d, z0.s +; VBITS_GE_128_STREAMING-NEXT: ext z0.b, z0.b, z0.b, #8 +; VBITS_GE_128_STREAMING-NEXT: mov x8, #2 +; VBITS_GE_128_STREAMING-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_128_STREAMING-NEXT: lsl z2.d, p0/m, z2.d, z1.d +; VBITS_GE_128_STREAMING-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; VBITS_GE_128_STREAMING-NEXT: asr z2.d, p0/m, z2.d, z1.d +; VBITS_GE_128_STREAMING-NEXT: asr z0.d, p0/m, z0.d, z1.d +; VBITS_GE_128_STREAMING-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; VBITS_GE_128_STREAMING-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; VBITS_GE_128_STREAMING-NEXT: ld1d { z0.d }, p1/z, [x0] +; VBITS_GE_128_STREAMING-NEXT: ld1d { z1.d }, p0/z, [x0, x8, lsl #3] +; VBITS_GE_128_STREAMING-NEXT: // kill: def $q0 killed $q0 killed $z0 +; VBITS_GE_128_STREAMING-NEXT: // kill: def $q1 killed $q1 killed $z1 +; VBITS_GE_128_STREAMING-NEXT: ret +; +; VBITS_GE_256_STREAMING-LABEL: masked_load_v4f64: +; VBITS_GE_256_STREAMING: // %bb.0: +; VBITS_GE_256_STREAMING-NEXT: // kill: def $d0 killed $d0 def $z0 +; VBITS_GE_256_STREAMING-NEXT: ptrue p0.d, vl4 +; VBITS_GE_256_STREAMING-NEXT: uunpklo z0.s, z0.h +; VBITS_GE_256_STREAMING-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_256_STREAMING-NEXT: lsl z0.d, p0/m, z0.d, #63 +; VBITS_GE_256_STREAMING-NEXT: asr z0.d, p0/m, z0.d, #63 +; VBITS_GE_256_STREAMING-NEXT: cmpne p1.d, p0/z, z0.d, #0 +; VBITS_GE_256_STREAMING-NEXT: ld1d { z0.d }, p1/z, [x0] +; VBITS_GE_256_STREAMING-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_256_STREAMING-NEXT: ret +; +; VBITS_GE_512_STREAMING-LABEL: masked_load_v4f64: +; VBITS_GE_512_STREAMING: // %bb.0: +; VBITS_GE_512_STREAMING-NEXT: // kill: def $d0 killed $d0 def $z0 +; VBITS_GE_512_STREAMING-NEXT: ptrue p0.d, vl4 +; VBITS_GE_512_STREAMING-NEXT: uunpklo z0.s, z0.h +; VBITS_GE_512_STREAMING-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_512_STREAMING-NEXT: lsl z0.d, p0/m, z0.d, #63 +; VBITS_GE_512_STREAMING-NEXT: asr z0.d, p0/m, z0.d, #63 +; VBITS_GE_512_STREAMING-NEXT: cmpne p1.d, p0/z, z0.d, #0 +; VBITS_GE_512_STREAMING-NEXT: ld1d { z0.d }, p1/z, [x0] +; VBITS_GE_512_STREAMING-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_512_STREAMING-NEXT: ret + %load = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %src, i32 8, <4 x i1> %mask, <4 x double> zeroinitializer) + ret <4 x double> %load +} + +declare <4 x i8> @llvm.masked.load.v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>) +declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) +declare <2 x half> @llvm.masked.load.v2f16(<2 x half>*, i32, <2 x i1>, <2 x half>) +declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) +declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>) +declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll @@ -0,0 +1,138 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -aarch64-sve-vector-bits-min=128 -force-streaming-mode-compatible-sve < %s | FileCheck %s +; RUN: llc -aarch64-sve-vector-bits-min=256 -force-streaming-mode-compatible-sve < %s | FileCheck %s +; RUN: llc -aarch64-sve-vector-bits-min=512 -force-streaming-mode-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; Masked Store +; + +define void @masked_store_v4i8(<4 x i8>* %dst, <4 x i1> %mask) #0 { +; CHECK-LABEL: masked_store_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI0_0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x8] +; CHECK-NEXT: adrp x8, .LCPI0_1 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI0_1 +; CHECK-NEXT: ld1h { z2.h }, p0/z, [x8] +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: st1b { z2.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.masked.store.v4i8(<4 x i8> zeroinitializer, <4 x i8>* %dst, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_store_v16i8(<16 x i8>* %dst, <16 x i1> %mask) #0 { +; CHECK-LABEL: masked_store_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI1_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x8] +; CHECK-NEXT: adrp x8, .LCPI1_1 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI1_1 +; CHECK-NEXT: ld1b { z2.b }, p0/z, [x8] +; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: st1b { z2.b }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.masked.store.v16i8(<16 x i8> zeroinitializer, <16 x i8>* %dst, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_store_v2f16(<2 x half>* %dst, <2 x i1> %mask) #0 { +; CHECK-LABEL: masked_store_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str wzr, [sp, #12] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI2_0 +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] +; CHECK-NEXT: add x8, sp, #8 +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x8] +; CHECK-NEXT: adrp x8, .LCPI2_1 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI2_1 +; CHECK-NEXT: ld1h { z2.h }, p0/z, [x8] +; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: st1h { z2.h }, p0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + call void @llvm.masked.store.v2f16(<2 x half> zeroinitializer, <2 x half>* %dst, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_store_v4f32(<4 x float>* %dst, <4 x i1> %mask) #0 { +; CHECK-LABEL: masked_store_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI3_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8] +; CHECK-NEXT: adrp x8, .LCPI3_1 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI3_1 +; CHECK-NEXT: ld1w { z2.s }, p0/z, [x8] +; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: st1w { z2.s }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.masked.store.v4f32(<4 x float> zeroinitializer, <4 x float>* %dst, i32 8, <4 x i1> %mask) + ret void +} + +; this test case caused a crash, I don't why, I commented it now, but I'll investigate it later. +; define void @masked_store_v32f32(<32 x float>* %dst, <32 x i1> %mask) #0 { +; call void @llvm.masked.store.v32f32(<32 x float> zeroinitializer, <32 x float>* %dst, i32 8, <32 x i1> %mask) +; ret void +; } + +define void @masked_store_v2f64(<2 x double>* %dst, <2 x i1> %mask) #0 { +; CHECK-LABEL: masked_store_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI4_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x8] +; CHECK-NEXT: adrp x8, .LCPI4_1 +; CHECK-NEXT: add x8, x8, :lo12:.LCPI4_1 +; CHECK-NEXT: ld1d { z2.d }, p0/z, [x8] +; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: st1d { z2.d }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.masked.store.v2f64(<2 x double> zeroinitializer, <2 x double>* %dst, i32 8, <2 x i1> %mask) + ret void +} + +declare void @llvm.masked.store.v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>) +declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>) +declare void @llvm.masked.store.v2f16(<2 x half>, <2 x half>*, i32, <2 x i1>) +declare void @llvm.masked.store.v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) +declare void @llvm.masked.store.v32f32(<32 x float>, <32 x float>*, i32, <32 x i1>) +declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll @@ -51,7 +51,8 @@ ; CHECK-NEXT: add x8, x8, :lo12:.LCPI2_0 ; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] -; CHECK-NEXT: str s0, [x0] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str w8, [x0] ; CHECK-NEXT: ret store <2 x half> zeroinitializer, <2 x half>* %a ret void diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll @@ -0,0 +1,139 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -aarch64-sve-vector-bits-min=128 -force-streaming-mode-compatible-sve < %s | FileCheck %s +; RUN: llc -aarch64-sve-vector-bits-min=256 -force-streaming-mode-compatible-sve < %s | FileCheck %s +; RUN: llc -aarch64-sve-vector-bits-min=512 -force-streaming-mode-compatible-sve < %s | FileCheck %s +; RUN: llc -aarch64-sve-vector-bits-min=2048 -force-streaming-mode-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @store_trunc_v8i16i8(<8 x i16>* %ap, <8 x i8>* %dest) #0 { +; CHECK-LABEL: store_trunc_v8i16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.h, z0.h[7] +; CHECK-NEXT: mov z3.h, z0.h[5] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: mov z4.h, z0.h[4] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z6.h, z0.h[2] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w9, [sp, #15] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w8, [sp, #13] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z5.h, z0.h[3] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: strb w10, [sp, #14] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w9, [sp, #12] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strb w8, [sp, #10] +; CHECK-NEXT: add x8, sp, #8 +; CHECK-NEXT: strb w10, [sp, #11] +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x8] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %ap + %val = trunc <8 x i16> %a to <8 x i8> + store <8 x i8> %val, <8 x i8>* %dest + ret void +} + +define void @store_trunc_v4i32i8(<4 x i32>* %ap, <4 x i8>* %dest) #0 { +; CHECK-LABEL: store_trunc_v4i32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: add x8, sp, #8 +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %ap + %val = trunc <4 x i32> %a to <4 x i8> + store <4 x i8> %val, <4 x i8>* %dest + ret void +} + +define void @store_trunc_v4i32i16(<4 x i32>* %ap, <4 x i16>* %dest) #0 { +; CHECK-LABEL: store_trunc_v4i32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z1.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: strh w8, [sp, #10] +; CHECK-NEXT: add x8, sp, #8 +; CHECK-NEXT: strh w10, [sp, #12] +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x8] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %ap + %val = trunc <4 x i32> %a to <4 x i16> + store <4 x i16> %val, <4 x i16>* %dest + ret void +} + +define void @store_trunc_v2i64i8(<2 x i64>* %ap, <2 x i32>* %dest) vscale_range(2,0) #0 { +; CHECK-LABEL: store_trunc_v2i64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: st1w { z0.d }, p0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %ap + %val = trunc <2 x i64> %a to <2 x i32> + store <2 x i32> %val, <2 x i32>* %dest + ret void +} + +define void @store_trunc_v2i256i64(<2 x i256>* %ap, <2 x i64>* %dest) vscale_range(2,0) #0 { +; CHECK-LABEL: store_trunc_v2i256i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0, #32] +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: ldr d1, [x0] +; CHECK-NEXT: splice z1.d, p0, z1.d, z0.d +; CHECK-NEXT: str q1, [x1] +; CHECK-NEXT: ret + %a = load <2 x i256>, <2 x i256>* %ap + %val = trunc <2 x i256> %a to <2 x i64> + store <2 x i64> %val, <2 x i64>* %dest + ret void +} + +attributes #0 = { "target-features"="+sve" }