diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1622,6 +1622,7 @@ setOperationAction(ISD::MULHU, VT, Custom); setOperationAction(ISD::ABS, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); + setOperationAction(ISD::CTLZ, VT, Custom); } void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { @@ -8416,7 +8417,7 @@ assert(!IsParity && "ISD::PARITY of vector types not supported"); - if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) + if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, Subtarget->forceStreamingCompatibleSVE())) return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU); assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || @@ -12204,7 +12205,7 @@ SelectionDAG &DAG) const { assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!"); - if (useSVEForFixedLengthVectorVT(Op.getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getValueType(), Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthInsertVectorElt(Op, DAG); // Check for non-constant or out of range lane. @@ -12791,7 +12792,7 @@ if (Op.getValueType().isScalableVector()) return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO); - if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthVectorSetccToSVE(Op, DAG); ISD::CondCode CC = cast(Op.getOperand(2))->get(); @@ -15525,7 +15526,8 @@ // The combining code currently only works for NEON vectors. In particular, // it does not work for SVE when dealing with vectors wider than 128 bits. - if (!VT.is64BitVector() && !VT.is128BitVector()) + if ((!VT.is64BitVector() && !VT.is128BitVector()) || + DAG.getSubtarget().forceStreamingCompatibleSVE()) return SDValue(); SDValue N0 = N->getOperand(0); @@ -22628,7 +22630,7 @@ EVT InVT = Op.getOperand(0).getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); - assert(useSVEForFixedLengthVectorVT(InVT) && + assert(useSVEForFixedLengthVectorVT(InVT, Subtarget->forceStreamingCompatibleSVE()) && "Only expected to lower fixed length vector operation!"); assert(Op.getValueType() == InVT.changeTypeToInteger() && "Expected integer result of the same bit length as the inputs!"); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -3655,7 +3655,11 @@ if (AArch64::FPR128RegClass.contains(DestReg) && AArch64::FPR128RegClass.contains(SrcReg)) { - if (Subtarget.hasNEON()) { + if (Subtarget.forceStreamingCompatibleSVE()) { + BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ), DestReg) + .addReg(AArch64::Z0 + (SrcReg - AArch64::Q0), RegState::Define) + .addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define); + } else if (Subtarget.hasNEON()) { BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) .addReg(SrcReg) .addReg(SrcReg, getKillRegState(KillSrc)); diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll @@ -0,0 +1,638 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; CLZ +; + +define <4 x i8> @ctlz_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: ctlz_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: adrp x9, .LCPI0_1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: ldr d2, [x9, :lo12:.LCPI0_1] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: sub z0.h, z0.h, z2.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.ctlz.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @ctlz_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: ctlz_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @ctlz_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: ctlz_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @ctlz_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: ctlz_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: clz z1.b, p0/m, z1.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @ctlz_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: ctlz_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: adrp x9, .LCPI4_1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: ldr d2, [x9, :lo12:.LCPI4_1] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: sub z0.s, z0.s, z2.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @ctlz_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: ctlz_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @ctlz_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: ctlz_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @ctlz_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: ctlz_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: clz z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @ctlz_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: ctlz_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @ctlz_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: ctlz_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @ctlz_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: ctlz_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: clz z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @ctlz_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: ctlz_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @ctlz_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: ctlz_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @ctlz_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: ctlz_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: clz z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; CNT +; + +define <4 x i8> @ctpop_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: ctpop_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @ctpop_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: ctpop_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cnt v0.8b, v0.8b +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @ctpop_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: ctpop_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cnt v0.16b, v0.16b +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @ctpop_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: ctpop_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: cnt v0.16b, v0.16b +; CHECK-NEXT: cnt v1.16b, v1.16b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @ctpop_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: ctpop_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @ctpop_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: ctpop_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @ctpop_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: ctpop_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @ctpop_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: ctpop_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: cnt z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @ctpop_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: ctpop_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @ctpop_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: ctpop_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @ctpop_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: ctpop_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: cnt z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @ctpop_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: ctpop_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: cnt z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @ctpop_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: ctpop_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: cnt z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @ctpop_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: ctpop_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: cnt z0.d, p0/m, z0.d +; CHECK-NEXT: cnt z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; Count trailing zeros +; + +define <4 x i8> @cttz_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: cttz_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI28_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI28_0] +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @cttz_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: cttz_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @cttz_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: cttz_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @cttz_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: cttz_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: rbit z1.b, p0/m, z1.b +; CHECK-NEXT: clz z1.b, p0/m, z1.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @cttz_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: cttz_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI32_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI32_0] +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @cttz_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: cttz_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @cttz_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: cttz_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @cttz_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: cttz_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: rbit z1.h, p0/m, z1.h +; CHECK-NEXT: clz z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @cttz_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: cttz_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @cttz_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: cttz_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @cttz_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: cttz_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: rbit z1.s, p0/m, z1.s +; CHECK-NEXT: clz z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @cttz_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: cttz_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @cttz_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: cttz_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @cttz_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: cttz_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: rbit z1.d, p0/m, z1.d +; CHECK-NEXT: clz z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } + +declare <4 x i8> @llvm.ctlz.v4i8(<4 x i8>) +declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>) +declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) +declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>) +declare <2 x i16> @llvm.ctlz.v2i16(<2 x i16>) +declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>) +declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>) +declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>) +declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>) +declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>) +declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>) +declare <1 x i64> @llvm.ctlz.v1i64(<1 x i64>) +declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) +declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>) + +declare <4 x i8> @llvm.ctpop.v4i8(<4 x i8>) +declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>) +declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) +declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>) +declare <2 x i16> @llvm.ctpop.v2i16(<2 x i16>) +declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>) +declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) +declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) +declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) +declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) +declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) +declare <1 x i64> @llvm.ctpop.v1i64(<1 x i64>) +declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) +declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) + +declare <4 x i8> @llvm.cttz.v4i8(<4 x i8>) +declare <8 x i8> @llvm.cttz.v8i8(<8 x i8>) +declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>) +declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>) +declare <2 x i16> @llvm.cttz.v2i16(<2 x i16>) +declare <4 x i16> @llvm.cttz.v4i16(<4 x i16>) +declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>) +declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>) +declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>) +declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>) +declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>) +declare <1 x i64> @llvm.cttz.v1i64(<1 x i64>) +declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) +declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll @@ -0,0 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64" + +; +; NOTE: SVE lowering for the BSP pseudoinst is not currently implemented, so we +; don't currently expect the code below to lower to BSL/BIT/BIF. Once +; this is implemented, this test will be fleshed out. +; + +define <8 x i32> @fixed_bitselect_v8i32(<8 x i32>* %pre_cond_ptr, <8 x i32>* %left_ptr, <8 x i32>* %right_ptr) #0 { +; CHECK-LABEL: fixed_bitselect_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: adrp x8, .LCPI0_1 +; CHECK-NEXT: ldp q3, q4, [x1] +; CHECK-NEXT: sub z6.s, z2.s, z1.s +; CHECK-NEXT: sub z2.s, z2.s, z0.s +; CHECK-NEXT: and z3.d, z6.d, z3.d +; CHECK-NEXT: ldp q7, q16, [x2] +; CHECK-NEXT: and z2.d, z2.d, z4.d +; CHECK-NEXT: ldr q5, [x8, :lo12:.LCPI0_1] +; CHECK-NEXT: add z1.s, z1.s, z5.s +; CHECK-NEXT: add z0.s, z0.s, z5.s +; CHECK-NEXT: and z4.d, z0.d, z16.d +; CHECK-NEXT: and z0.d, z1.d, z7.d +; CHECK-NEXT: orr z0.d, z0.d, z3.d +; CHECK-NEXT: orr z1.d, z4.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %pre_cond = load <8 x i32>, <8 x i32>* %pre_cond_ptr + %left = load <8 x i32>, <8 x i32>* %left_ptr + %right = load <8 x i32>, <8 x i32>* %right_ptr + + %neg_cond = sub <8 x i32> zeroinitializer, %pre_cond + %min_cond = add <8 x i32> %pre_cond, + %left_bits_0 = and <8 x i32> %neg_cond, %left + %right_bits_0 = and <8 x i32> %min_cond, %right + %bsl0000 = or <8 x i32> %right_bits_0, %left_bits_0 + ret <8 x i32> %bsl0000 +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll @@ -0,0 +1,414 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; insertelement +; + +; i8 +define <4 x i8> @insertelement_v4i8(<4 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x i8> %op1, i8 5, i64 3 + ret <4 x i8> %r +} + +define <8 x i8> @insertelement_v8i8(<8 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.b, w8 +; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z1.b +; CHECK-NEXT: mov z0.b, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <8 x i8> %op1, i8 5, i64 7 + ret <8 x i8> %r +} + +define <16 x i8> @insertelement_v16i8(<16 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #15 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.b, w8 +; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z1.b +; CHECK-NEXT: mov z0.b, p0/m, w9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <16 x i8> %op1, i8 5, i64 15 + ret <16 x i8> %r +} + +define <32 x i8> @insertelement_v32i8(<32 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #15 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z3.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z2.b, w8 +; CHECK-NEXT: cmpeq p0.b, p0/z, z3.b, z2.b +; CHECK-NEXT: mov z1.b, p0/m, w9 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %r = insertelement <32 x i8> %op1, i8 5, i64 31 + ret <32 x i8> %r +} + +; i16 +define <2 x i16> @insertelement_v2i16(<2 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x i16> %op1, i16 5, i64 1 + ret <2 x i16> %r +} + +define <4 x i16> @insertelement_v4i16(<4 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x i16> %op1, i16 5, i64 3 + ret <4 x i16> %r +} + +define <8 x i16> @insertelement_v8i16(<8 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, w9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <8 x i16> %op1, i16 5, i64 7 + ret <8 x i16> %r +} + +define <16 x i16> @insertelement_v16i16(<16 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z3.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z1.h, p0/m, w9 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %r = insertelement <16 x i16> %op1, i16 5, i64 15 + ret <16 x i16> %r +} + +;i32 +define <2 x i32> @insertelement_v2i32(<2 x i32> %op1) #0 { +; CHECK-LABEL: insertelement_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x i32> %op1, i32 5, i64 1 + ret <2 x i32> %r +} + +define <4 x i32> @insertelement_v4i32(<4 x i32> %op1) #0 { +; CHECK-LABEL: insertelement_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x i32> %op1, i32 5, i64 3 + ret <4 x i32> %r +} + +define <8 x i32> @insertelement_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: insertelement_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, w8 +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z1.s, p0/m, w8 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %r = insertelement <8 x i32> %op1, i32 5, i64 7 + ret <8 x i32> %r +} + +;i64 +define <1 x i64> @insertelement_v1i64(<1 x i64> %op1) #0 { +; CHECK-LABEL: insertelement_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret + %r = insertelement <1 x i64> %op1, i64 5, i64 0 + ret <1 x i64> %r +} + +define <2 x i64> @insertelement_v2i64(<2 x i64> %op1) #0 { +; CHECK-LABEL: insertelement_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.d, x8 +; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z1.d +; CHECK-NEXT: mov z0.d, p0/m, x9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x i64> %op1, i64 5, i64 1 + ret <2 x i64> %r +} + +define <4 x i64> @insertelement_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: insertelement_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: index z3.d, #0, #1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, x8 +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: cmpeq p0.d, p0/z, z3.d, z2.d +; CHECK-NEXT: mov z1.d, p0/m, x8 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %r = insertelement <4 x i64> %op1, i64 5, i64 3 + ret <4 x i64> %r +} + +;f16 +define <2 x half> @insertelement_v2f16(<2 x half> %op1) #0 { +; CHECK-LABEL: insertelement_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: fmov h1, #5.00000000 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: str h1, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %r = insertelement <2 x half> %op1, half 5.0, i64 1 + ret <2 x half> %r +} + +define <4 x half> @insertelement_v4f16(<4 x half> %op1) #0 { +; CHECK-LABEL: insertelement_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: fmov h1, #5.00000000 +; CHECK-NEXT: index z3.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, h1 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x half> %op1, half 5.0, i64 3 + ret <4 x half> %r +} + +define <8 x half> @insertelement_v8f16(<8 x half> %op1) #0 { +; CHECK-LABEL: insertelement_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: fmov h1, #5.00000000 +; CHECK-NEXT: index z3.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, h1 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <8 x half> %op1, half 5.0, i64 7 + ret <8 x half> %r +} + +define <16 x half> @insertelement_v16f16(<16 x half>* %a) #0 { +; CHECK-LABEL: insertelement_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: fmov h3, #5.00000000 +; CHECK-NEXT: index z4.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z4.h, z2.h +; CHECK-NEXT: mov z1.h, p0/m, h3 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %r = insertelement <16 x half> %op1, half 5.0, i64 15 + ret <16 x half> %r +} + +;f32 +define <2 x float> @insertelement_v2f32(<2 x float> %op1) #0 { +; CHECK-LABEL: insertelement_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: fmov s1, #5.00000000 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z2.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, s1 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x float> %op1, float 5.0, i64 1 + ret <2 x float> %r +} + +define <4 x float> @insertelement_v4f32(<4 x float> %op1) #0 { +; CHECK-LABEL: insertelement_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: fmov s1, #5.00000000 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z2.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, s1 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x float> %op1, float 5.0, i64 3 + ret <4 x float> %r +} + +define <8 x float> @insertelement_v8f32(<8 x float>* %a) #0 { +; CHECK-LABEL: insertelement_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: fmov s4, #5.00000000 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z3.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z3.s +; CHECK-NEXT: mov z1.s, p0/m, s4 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %r = insertelement <8 x float> %op1, float 5.0, i64 7 + ret <8 x float> %r +} + +;f64 +define <1 x double> @insertelement_v1f64(<1 x double> %op1) #0 { +; CHECK-LABEL: insertelement_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, #5.00000000 +; CHECK-NEXT: ret + %r = insertelement <1 x double> %op1, double 5.0, i64 0 + ret <1 x double> %r +} + +define <2 x double> @insertelement_v2f64(<2 x double> %op1) #0 { +; CHECK-LABEL: insertelement_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: fmov d1, #5.00000000 +; CHECK-NEXT: index z3.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z2.d, x8 +; CHECK-NEXT: cmpeq p0.d, p0/z, z3.d, z2.d +; CHECK-NEXT: mov z0.d, p0/m, d1 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x double> %op1, double 5.0, i64 1 + ret <2 x double> %r +} + +define <4 x double> @insertelement_v4f64(<4 x double>* %a) #0 { +; CHECK-LABEL: insertelement_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: fmov d3, #5.00000000 +; CHECK-NEXT: index z4.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, x8 +; CHECK-NEXT: cmpeq p0.d, p0/z, z4.d, z2.d +; CHECK-NEXT: mov z1.d, p0/m, d3 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %r = insertelement <4 x double> %op1, double 5.0, i64 3 + ret <4 x double> %r +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll @@ -0,0 +1,331 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; i8 +define void @subvector_v4i8(<4 x i8> *%in, <4 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v4i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i8>, <4 x i8>* %in + br label %bb1 + +bb1: + store <4 x i8> %a, <4 x i8>* %out + ret void +} + +define void @subvector_v8i8(<8 x i8> *%in, <8 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v8i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %in + br label %bb1 + +bb1: + store <8 x i8> %a, <8 x i8>* %out + ret void +} + +define void @subvector_v16i8(<16 x i8> *%in, <16 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v16i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %in + br label %bb1 + +bb1: + store <16 x i8> %a, <16 x i8>* %out + ret void +} + +define void @subvector_v32i8(<32 x i8> *%in, <32 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v32i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %in + br label %bb1 + +bb1: + store <32 x i8> %a, <32 x i8>* %out + ret void +} + +; i16 +define void @subvector_v2i16(<2 x i16> *%in, <2 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v2i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w8, [x0, #2] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <2 x i16>, <2 x i16>* %in + br label %bb1 + +bb1: + store <2 x i16> %a, <2 x i16>* %out + ret void +} + +define void @subvector_v4i16(<4 x i16> *%in, <4 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v4i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i16>, <4 x i16>* %in + br label %bb1 + +bb1: + store <4 x i16> %a, <4 x i16>* %out + ret void +} + +define void @subvector_v8i16(<8 x i16> *%in, <8 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v8i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %in + br label %bb1 + +bb1: + store <8 x i16> %a, <8 x i16>* %out + ret void +} + +define void @subvector_v16i16(<16 x i16> *%in, <16 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v16i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %in + br label %bb1 + +bb1: + store <16 x i16> %a, <16 x i16>* %out + ret void +} + +; i32 +define void @subvector_v2i32(<2 x i32> *%in, <2 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v2i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i32>, <2 x i32>* %in + br label %bb1 + +bb1: + store <2 x i32> %a, <2 x i32>* %out + ret void +} + +define void @subvector_v4i32(<4 x i32> *%in, <4 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v4i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %in + br label %bb1 + +bb1: + store <4 x i32> %a, <4 x i32>* %out + ret void +} + +define void @subvector_v8i32(<8 x i32> *%in, <8 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v8i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + br label %bb1 + +bb1: + store <8 x i32> %a, <8 x i32>* %out + ret void +} + +; i64 +define void @subvector_v2i64(<2 x i64> *%in, <2 x i64>* %out) #0 { +; CHECK-LABEL: subvector_v2i64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %in + br label %bb1 + +bb1: + store <2 x i64> %a, <2 x i64>* %out + ret void +} + +define void @subvector_v4i64(<4 x i64> *%in, <4 x i64>* %out) #0 { +; CHECK-LABEL: subvector_v4i64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + br label %bb1 + +bb1: + store <4 x i64> %a, <4 x i64>* %out + ret void +} + +; f16 +define void @subvector_v2f16(<2 x half> *%in, <2 x half>* %out) #0 { +; CHECK-LABEL: subvector_v2f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr w8, [x0] +; CHECK-NEXT: str w8, [x1] +; CHECK-NEXT: ret + %a = load <2 x half>, <2 x half>* %in + br label %bb1 + +bb1: + store <2 x half> %a, <2 x half>* %out + ret void +} + +define void @subvector_v4f16(<4 x half> *%in, <4 x half>* %out) #0 { +; CHECK-LABEL: subvector_v4f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <4 x half>, <4 x half>* %in + br label %bb1 + +bb1: + store <4 x half> %a, <4 x half>* %out + ret void +} + +define void @subvector_v8f16(<8 x half> *%in, <8 x half>* %out) #0 { +; CHECK-LABEL: subvector_v8f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x half>, <8 x half>* %in + br label %bb1 + +bb1: + store <8 x half> %a, <8 x half>* %out + ret void +} + +define void @subvector_v16f16(<16 x half> *%in, <16 x half>* %out) #0 { +; CHECK-LABEL: subvector_v16f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <16 x half>, <16 x half>* %in + br label %bb1 + +bb1: + store <16 x half> %a, <16 x half>* %out + ret void +} + +; f32 +define void @subvector_v2f32(<2 x float> *%in, <2 x float>* %out) #0 { +; CHECK-LABEL: subvector_v2f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <2 x float>, <2 x float>* %in + br label %bb1 + +bb1: + store <2 x float> %a, <2 x float>* %out + ret void +} + +define void @subvector_v4f32(<4 x float> *%in, <4 x float>* %out) #0 { +; CHECK-LABEL: subvector_v4f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <4 x float>, <4 x float>* %in + br label %bb1 + +bb1: + store <4 x float> %a, <4 x float>* %out + ret void +} + +define void @subvector_v8f32(<8 x float> *%in, <8 x float>* %out) #0 { +; CHECK-LABEL: subvector_v8f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <8 x float>, <8 x float>* %in + br label %bb1 + +bb1: + store <8 x float> %a, <8 x float>* %out + ret void +} + +; f64 +define void @subvector_v2f64(<2 x double> *%in, <2 x double>* %out) #0 { +; CHECK-LABEL: subvector_v2f64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <2 x double>, <2 x double>* %in + br label %bb1 + +bb1: + store <2 x double> %a, <2 x double>* %out + ret void +} + +define void @subvector_v4f64(<4 x double> *%in, <4 x double>* %out) #0 { +; CHECK-LABEL: subvector_v4f64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <4 x double>, <4 x double>* %in + br label %bb1 + +bb1: + store <4 x double> %a, <4 x double>* %out + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll @@ -0,0 +1,356 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; define <4 x i8> @shuffle_ext_byone_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; %ret = shufflevector <4 x i8> %op1, <4 x i8> %op2, <4 x i32> +; ret <4 x i8> %ret +; } + +define <8 x i8> @shuffle_ext_byone_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.b, z0.b[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <8 x i8> %op1, <8 x i8> %op2, <8 x i32> + ret <8 x i8> %ret +} + +define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <16 x i8> %op1, <16 x i8> %op2, <16 x i32> + ret <16 x i8> %ret +} + +define void @shuffle_ext_byone_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: mov z2.b, z1.b[15] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: insr z0.b, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %ret = shufflevector <32 x i8> %op1, <32 x i8> %op2, <32 x i32> + store <32 x i8> %ret, <32 x i8>* %a + ret void +} + +; define <2 x i16> @shuffle_ext_byone_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; %ret = shufflevector <2 x i16> %op1, <2 x i16> %op2, <2 x i32> +; ret <2 x i16> %ret +; } + +define <4 x i16> @shuffle_ext_byone_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <4 x i16> %op1, <4 x i16> %op2, <4 x i32> + ret <4 x i16> %ret +} + +define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <8 x i16> %op1, <8 x i16> %op2, <8 x i32> + ret <8 x i16> %ret +} + +define void @shuffle_ext_byone_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: mov z2.h, z1.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: insr z0.h, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %ret = shufflevector <16 x i16> %op1, <16 x i16> %op2, <16 x i32> + store <16 x i16> %ret, <16 x i16>* %a + ret void +} + +define <2 x i32> @shuffle_ext_byone_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <2 x i32> %op1, <2 x i32> %op2, <2 x i32> + ret <2 x i32> %ret +} + +define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <4 x i32> %op1, <4 x i32> %op2, <4 x i32> + ret <4 x i32> %ret +} + +define void @shuffle_ext_byone_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: insr z0.s, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %ret = shufflevector <8 x i32> %op1, <8 x i32> %op2, <8 x i32> + store <8 x i32> %ret, <8 x i32>* %a + ret void +} + +define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: insr z1.d, x8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <2 x i64> %op1, <2 x i64> %op2, <2 x i32> + ret <2 x i64> %ret +} + +define void @shuffle_ext_byone_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: mov z2.d, z1.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: insr z1.d, x8 +; CHECK-NEXT: insr z0.d, x9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %ret = shufflevector <4 x i64> %op1, <4 x i64> %op2, <4 x i32> + store <4 x i64> %ret, <4 x i64>* %a + ret void +} + + +define <4 x half> @shuffle_ext_byone_v4f16(<4 x half> %op1, <4 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <4 x half> %op1, <4 x half> %op2, <4 x i32> + ret <4 x half> %ret +} + +define <8 x half> @shuffle_ext_byone_v8f16(<8 x half> %op1, <8 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <8 x half> %op1, <8 x half> %op2, <8 x i32> + ret <8 x half> %ret +} + +define void @shuffle_ext_byone_v16f16(<16 x half>* %a, <16 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.h, z1.h[7] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.h, h3 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b + %ret = shufflevector <16 x half> %op1, <16 x half> %op2, <16 x i32> + store <16 x half> %ret, <16 x half>* %a + ret void +} + +define <2 x float> @shuffle_ext_byone_v2f32(<2 x float> %op1, <2 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <2 x float> %op1, <2 x float> %op2, <2 x i32> + ret <2 x float> %ret +} + +define <4 x float> @shuffle_ext_byone_v4f32(<4 x float> %op1, <4 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <4 x float> %op1, <4 x float> %op2, <4 x i32> + ret <4 x float> %ret +} + +define void @shuffle_ext_byone_v8f32(<8 x float>* %a, <8 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.s, s3 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b + %ret = shufflevector <8 x float> %op1, <8 x float> %op2, <8 x i32> + store <8 x float> %ret, <8 x float>* %a + ret void +} + +define <2 x double> @shuffle_ext_byone_v2f64(<2 x double> %op1, <2 x double> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <2 x double> %op1, <2 x double> %op2, <2 x i32> + ret <2 x double> %ret +} + +define void @shuffle_ext_byone_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.d, z1.d[1] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.d, d3 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_byone_reverse(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_reverse: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: mov z3.d, z1.d[1] +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: insr z2.d, d3 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_invalid: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" }