diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1620,6 +1620,17 @@ setOperationAction(ISD::MULHU, VT, Custom); setOperationAction(ISD::ABS, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); + setOperationAction(ISD::CTLZ, VT, Custom); + setOperationAction(ISD::SMIN, VT, Custom); + setOperationAction(ISD::SMAX, VT, Custom); + setOperationAction(ISD::UMIN, VT, Custom); + setOperationAction(ISD::UMAX, VT, Custom); + setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); + setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); + setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); + setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); + setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); + setOperationAction(ISD::TRUNCATE, VT, Custom); } void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { @@ -8414,7 +8425,7 @@ assert(!IsParity && "ISD::PARITY of vector types not supported"); - if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) + if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, Subtarget->forceStreamingCompatibleSVE())) return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU); assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || @@ -12202,7 +12213,7 @@ SelectionDAG &DAG) const { assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!"); - if (useSVEForFixedLengthVectorVT(Op.getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getValueType(), Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthInsertVectorElt(Op, DAG); // Check for non-constant or out of range lane. @@ -12618,7 +12629,7 @@ if (!VT.isVector() || VT.isScalableVector()) return SDValue(); - if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthVectorTruncateToSVE(Op, DAG); return SDValue(); @@ -12789,7 +12800,7 @@ if (Op.getValueType().isScalableVector()) return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO); - if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType())) + if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), Subtarget->forceStreamingCompatibleSVE())) return LowerFixedLengthVectorSetccToSVE(Op, DAG); ISD::CondCode CC = cast(Op.getOperand(2))->get(); @@ -12871,6 +12882,11 @@ Op.getOpcode() == ISD::VECREDUCE_OR || Op.getOpcode() == ISD::VECREDUCE_XOR || Op.getOpcode() == ISD::VECREDUCE_FADD || + Op.getOpcode() == ISD::VECREDUCE_SMAX || + Op.getOpcode() == ISD::VECREDUCE_SMIN || + Op.getOpcode() == ISD::VECREDUCE_UMAX || + Op.getOpcode() == ISD::VECREDUCE_UMIN || + Op.getOpcode() == ISD::VECREDUCE_ADD || (Op.getOpcode() != ISD::VECREDUCE_ADD && SrcVT.getVectorElementType() == MVT::i64); if (SrcVT.isScalableVector() || @@ -15523,7 +15539,8 @@ // The combining code currently only works for NEON vectors. In particular, // it does not work for SVE when dealing with vectors wider than 128 bits. - if (!VT.is64BitVector() && !VT.is128BitVector()) + if ((!VT.is64BitVector() && !VT.is128BitVector()) || + DAG.getSubtarget().forceStreamingCompatibleSVE()) return SDValue(); SDValue N0 = N->getOperand(0); @@ -22655,7 +22672,7 @@ EVT InVT = Op.getOperand(0).getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); - assert(useSVEForFixedLengthVectorVT(InVT) && + assert(useSVEForFixedLengthVectorVT(InVT, Subtarget->forceStreamingCompatibleSVE()) && "Only expected to lower fixed length vector operation!"); assert(Op.getValueType() == InVT.changeTypeToInteger() && "Expected integer result of the same bit length as the inputs!"); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -3653,7 +3653,11 @@ if (AArch64::FPR128RegClass.contains(DestReg) && AArch64::FPR128RegClass.contains(SrcReg)) { - if (Subtarget.hasNEON()) { + if (Subtarget.forceStreamingCompatibleSVE()) { + BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ), DestReg) + .addReg(AArch64::Z0 + (SrcReg - AArch64::Q0), RegState::Define) + .addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define); + } else if (Subtarget.hasNEON()) { BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg) .addReg(SrcReg) .addReg(SrcReg, getKillRegState(KillSrc)); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -261,6 +261,8 @@ bool isLegalMaskedGatherScatter(Type *DataType) const { if (!ST->hasSVE()) return false; + if(ST->forceStreamingCompatibleSVE()) + return false; // For fixed vectors, scalarize if not using SVE for them. auto *DataTypeFVTy = dyn_cast(DataType); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll @@ -13,8 +13,11 @@ define i8 @uaddv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: addv b0, v0.8b -; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uaddv d0, p0, z0.b +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a) ret i8 %res @@ -24,8 +27,11 @@ define i8 @uaddv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: addv b0, v0.16b -; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: uaddv d0, p0, z0.b +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %a) ret i8 %res @@ -103,8 +109,11 @@ define i16 @uaddv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: addv h0, v0.4h -; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uaddv d0, p0, z0.h +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a) ret i16 %res @@ -114,8 +123,11 @@ define i16 @uaddv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: addv h0, v0.8h -; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: uaddv d0, p0, z0.h +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %a) ret i16 %res @@ -193,8 +205,11 @@ define i32 @uaddv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: addp v0.2s, v0.2s, v0.2s -; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uaddv d0, p0, z0.s +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a) ret i32 %res @@ -204,8 +219,11 @@ define i32 @uaddv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: addv s0, v0.4s -; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uaddv d0, p0, z0.s +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) ret i32 %res @@ -294,7 +312,9 @@ define i64 @uaddv_v2i64(<2 x i64> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uaddv_v2i64: ; CHECK: // %bb.0: -; CHECK-NEXT: addp d0, v0.2d +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: uaddv d0, p0, z0.d ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a) @@ -372,7 +392,9 @@ define i8 @smaxv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: smaxv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: smaxv b0, v0.8b +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: smaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %a) @@ -383,7 +405,9 @@ define i8 @smaxv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: smaxv_v16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: smaxv b0, v0.16b +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: smaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %a) @@ -457,7 +481,9 @@ define i16 @smaxv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: smaxv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: smaxv h0, v0.4h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: smaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %a) @@ -468,7 +494,9 @@ define i16 @smaxv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: smaxv_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: smaxv h0, v0.8h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: smaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %a) @@ -542,7 +570,9 @@ define i32 @smaxv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: smaxv_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: smaxp v0.2s, v0.2s, v0.2s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: smaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %a) @@ -553,7 +583,9 @@ define i32 @smaxv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: smaxv_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: smaxv s0, v0.4s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: smaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a) @@ -718,7 +750,9 @@ define i8 @sminv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: sminv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sminv b0, v0.8b +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: sminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %a) @@ -729,7 +763,9 @@ define i8 @sminv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: sminv_v16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sminv b0, v0.16b +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: sminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a) @@ -803,7 +839,9 @@ define i16 @sminv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: sminv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sminv h0, v0.4h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: sminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %a) @@ -814,7 +852,9 @@ define i16 @sminv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: sminv_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sminv h0, v0.8h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: sminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %a) @@ -888,7 +928,9 @@ define i32 @sminv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: sminv_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sminp v0.2s, v0.2s, v0.2s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: sminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %a) @@ -899,7 +941,9 @@ define i32 @sminv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: sminv_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: sminv s0, v0.4s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: sminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a) @@ -1064,7 +1108,9 @@ define i8 @umaxv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: umaxv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: umaxv b0, v0.8b +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: umaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %a) @@ -1075,7 +1121,9 @@ define i8 @umaxv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: umaxv_v16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: umaxv b0, v0.16b +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: umaxv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %a) @@ -1149,7 +1197,9 @@ define i16 @umaxv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: umaxv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: umaxv h0, v0.4h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: umaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %a) @@ -1160,7 +1210,9 @@ define i16 @umaxv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: umaxv_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: umaxv h0, v0.8h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: umaxv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %a) @@ -1234,7 +1286,9 @@ define i32 @umaxv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: umaxv_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: umaxp v0.2s, v0.2s, v0.2s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: umaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %a) @@ -1245,7 +1299,9 @@ define i32 @umaxv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: umaxv_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: umaxv s0, v0.4s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: umaxv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a) @@ -1410,7 +1466,9 @@ define i8 @uminv_v8i8(<8 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uminv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: uminv b0, v0.8b +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %a) @@ -1421,7 +1479,9 @@ define i8 @uminv_v16i8(<16 x i8> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uminv_v16i8: ; CHECK: // %bb.0: -; CHECK-NEXT: uminv b0, v0.16b +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: uminv b0, p0, z0.b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %a) @@ -1495,7 +1555,9 @@ define i16 @uminv_v4i16(<4 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uminv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: uminv h0, v0.4h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %a) @@ -1506,7 +1568,9 @@ define i16 @uminv_v8i16(<8 x i16> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uminv_v8i16: ; CHECK: // %bb.0: -; CHECK-NEXT: uminv h0, v0.8h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: uminv h0, p0, z0.h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %a) @@ -1580,7 +1644,9 @@ define i32 @uminv_v2i32(<2 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uminv_v2i32: ; CHECK: // %bb.0: -; CHECK-NEXT: uminp v0.2s, v0.2s, v0.2s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %a) @@ -1591,7 +1657,9 @@ define i32 @uminv_v4i32(<4 x i32> %a) vscale_range(2,0) #0 { ; CHECK-LABEL: uminv_v4i32: ; CHECK: // %bb.0: -; CHECK-NEXT: uminv s0, v0.4s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uminv s0, p0, z0.s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-compatible-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-compatible-fixed-length-int-vselect.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-compatible-fixed-length-int-vselect.ll @@ -0,0 +1,356 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define <4 x i8> @select_v4i8(<4 x i8> %op1, <4 x i8> %op2, <4 x i1> %mask) #0 { +; CHECK-LABEL: select_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: adrp x9, .LCPI0_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: ldr d4, [x9, :lo12:.LCPI0_1] +; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: asr z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %sel = select <4 x i1> %mask, <4 x i8> %op1, <4 x i8> %op2 + ret <4 x i8> %sel +} + +define <8 x i8> @select_v8i8(<8 x i8> %op1, <8 x i8> %op2, <8 x i1> %mask) #0 { +; CHECK-LABEL: select_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: adrp x9, .LCPI1_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: ldr d4, [x9, :lo12:.LCPI1_1] +; CHECK-NEXT: lsl z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: asr z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %sel = select <8 x i1> %mask, <8 x i8> %op1, <8 x i8> %op2 + ret <8 x i8> %sel +} + +define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask) #0 { +; CHECK-LABEL: select_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: adrp x9, .LCPI2_1 +; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI2_1] +; CHECK-NEXT: lsl z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: asr z2.b, p0/m, z2.b, z3.b +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %sel = select <16 x i1> %mask, <16 x i8> %op1, <16 x i8> %op2 + ret <16 x i8> %sel +} + +define void @select_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: select_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: cmpeq p1.b, p0/z, z2.b, z0.b +; CHECK-NEXT: cmpeq p0.b, p0/z, z3.b, z1.b +; CHECK-NEXT: mov z5.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z6.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: and z2.d, z2.d, z5.d +; CHECK-NEXT: eor z5.d, z5.d, z4.d +; CHECK-NEXT: eor z4.d, z6.d, z4.d +; CHECK-NEXT: and z3.d, z3.d, z6.d +; CHECK-NEXT: and z1.d, z1.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z5.d +; CHECK-NEXT: orr z1.d, z3.d, z1.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %mask = icmp eq <32 x i8> %op1, %op2 + %sel = select <32 x i1> %mask, <32 x i8> %op1, <32 x i8> %op2 + store <32 x i8> %sel, <32 x i8>* %a + ret void +} + +define <2 x i16> @select_v2i16(<2 x i16> %op1, <2 x i16> %op2, <2 x i1> %mask) #0 { +; CHECK-LABEL: select_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: adrp x9, .LCPI4_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: ldr d4, [x9, :lo12:.LCPI4_1] +; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: asr z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %sel = select <2 x i1> %mask, <2 x i16> %op1, <2 x i16> %op2 + ret <2 x i16> %sel +} + +define <4 x i16> @select_v4i16(<4 x i16> %op1, <4 x i16> %op2, <4 x i1> %mask) #0 { +; CHECK-LABEL: select_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: adrp x9, .LCPI5_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: ldr d4, [x9, :lo12:.LCPI5_1] +; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: asr z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %sel = select <4 x i1> %mask, <4 x i16> %op1, <4 x i16> %op2 + ret <4 x i16> %sel +} + +define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask) #0 { +; CHECK-LABEL: select_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: adrp x9, .LCPI6_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: uunpklo z2.h, z2.b +; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI6_1] +; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: asr z2.h, p0/m, z2.h, z3.h +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %sel = select <8 x i1> %mask, <8 x i16> %op1, <8 x i16> %op2 + ret <8 x i16> %sel +} + +define void @select_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: select_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: cmpeq p1.h, p0/z, z2.h, z0.h +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z1.h +; CHECK-NEXT: mov z5.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z6.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: and z2.d, z2.d, z5.d +; CHECK-NEXT: eor z5.d, z5.d, z4.d +; CHECK-NEXT: eor z4.d, z6.d, z4.d +; CHECK-NEXT: and z3.d, z3.d, z6.d +; CHECK-NEXT: and z1.d, z1.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z5.d +; CHECK-NEXT: orr z1.d, z3.d, z1.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %mask = icmp eq <16 x i16> %op1, %op2 + %sel = select <16 x i1> %mask, <16 x i16> %op1, <16 x i16> %op2 + store <16 x i16> %sel, <16 x i16>* %a + ret void +} + +define <2 x i32> @select_v2i32(<2 x i32> %op1, <2 x i32> %op2, <2 x i1> %mask) #0 { +; CHECK-LABEL: select_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: adrp x9, .LCPI8_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ldr d3, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: ldr d4, [x9, :lo12:.LCPI8_1] +; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: asr z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %sel = select <2 x i1> %mask, <2 x i32> %op1, <2 x i32> %op2 + ret <2 x i32> %sel +} + +define <4 x i32> @select_v4i32(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask) #0 { +; CHECK-LABEL: select_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: adrp x9, .LCPI9_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: uunpklo z2.s, z2.h +; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI9_1] +; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: asr z2.s, p0/m, z2.s, z3.s +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %sel = select <4 x i1> %mask, <4 x i32> %op1, <4 x i32> %op2 + ret <4 x i32> %sel +} + +define void @select_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: select_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: adrp x8, .LCPI10_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI10_0] +; CHECK-NEXT: cmpeq p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z5.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z6.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: and z2.d, z2.d, z5.d +; CHECK-NEXT: eor z5.d, z5.d, z4.d +; CHECK-NEXT: eor z4.d, z6.d, z4.d +; CHECK-NEXT: and z3.d, z3.d, z6.d +; CHECK-NEXT: and z1.d, z1.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z5.d +; CHECK-NEXT: orr z1.d, z3.d, z1.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %mask = icmp eq <8 x i32> %op1, %op2 + %sel = select <8 x i1> %mask, <8 x i32> %op1, <8 x i32> %op2 + store <8 x i32> %sel, <8 x i32>* %a + ret void +} + +define <1 x i64> @select_v1i64(<1 x i64> %op1, <1 x i64> %op2, <1 x i1> %mask) #0 { +; CHECK-LABEL: select_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: tst w0, #0x1 +; CHECK-NEXT: mov x9, #-1 +; CHECK-NEXT: csetm x8, ne +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: fmov d3, x9 +; CHECK-NEXT: fmov d2, x8 +; CHECK-NEXT: eor z3.d, z2.d, z3.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %sel = select <1 x i1> %mask, <1 x i64> %op1, <1 x i64> %op2 + ret <1 x i64> %sel +} + +define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask) #0 { +; CHECK-LABEL: select_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI12_0 +; CHECK-NEXT: adrp x9, .LCPI12_1 +; CHECK-NEXT: // kill: def $d2 killed $d2 def $z2 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: uunpklo z2.d, z2.s +; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI12_0] +; CHECK-NEXT: ldr q4, [x9, :lo12:.LCPI12_1] +; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: asr z2.d, p0/m, z2.d, z3.d +; CHECK-NEXT: eor z3.d, z2.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: and z1.d, z1.d, z3.d +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %sel = select <2 x i1> %mask, <2 x i64> %op1, <2 x i64> %op2 + ret <2 x i64> %sel +} + +define void @select_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: select_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: adrp x8, .LCPI13_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI13_0] +; CHECK-NEXT: cmpeq p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: cmpeq p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z5.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z6.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: and z2.d, z2.d, z5.d +; CHECK-NEXT: eor z5.d, z5.d, z4.d +; CHECK-NEXT: eor z4.d, z6.d, z4.d +; CHECK-NEXT: and z3.d, z3.d, z6.d +; CHECK-NEXT: and z1.d, z1.d, z4.d +; CHECK-NEXT: and z0.d, z0.d, z5.d +; CHECK-NEXT: orr z1.d, z3.d, z1.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %mask = icmp eq <4 x i64> %op1, %op2 + %sel = select <4 x i1> %mask, <4 x i64> %op1, <4 x i64> %op2 + store <4 x i64> %sel, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" uwtable } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bit-counting.ll @@ -0,0 +1,638 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; CLZ +; + +define <4 x i8> @ctlz_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: ctlz_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: adrp x9, .LCPI0_1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: ldr d2, [x9, :lo12:.LCPI0_1] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: sub z0.h, z0.h, z2.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.ctlz.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @ctlz_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: ctlz_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @ctlz_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: ctlz_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @ctlz_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: ctlz_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: clz z1.b, p0/m, z1.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @ctlz_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: ctlz_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: adrp x9, .LCPI4_1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: ldr d2, [x9, :lo12:.LCPI4_1] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: sub z0.s, z0.s, z2.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @ctlz_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: ctlz_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @ctlz_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: ctlz_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @ctlz_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: ctlz_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: clz z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @ctlz_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: ctlz_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @ctlz_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: ctlz_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @ctlz_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: ctlz_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: clz z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @ctlz_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: ctlz_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.ctlz.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @ctlz_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: ctlz_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @ctlz_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: ctlz_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: clz z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; CNT +; + +define <4 x i8> @ctpop_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: ctpop_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.ctpop.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @ctpop_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: ctpop_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cnt v0.8b, v0.8b +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @ctpop_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: ctpop_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: cnt v0.16b, v0.16b +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @ctpop_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: ctpop_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: cnt v0.16b, v0.16b +; CHECK-NEXT: cnt v1.16b, v1.16b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @ctpop_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: ctpop_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @ctpop_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: ctpop_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @ctpop_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: ctpop_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @ctpop_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: ctpop_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: cnt z0.h, p0/m, z0.h +; CHECK-NEXT: cnt z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @ctpop_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: ctpop_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @ctpop_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: ctpop_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @ctpop_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: ctpop_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: cnt z0.s, p0/m, z0.s +; CHECK-NEXT: cnt z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @ctpop_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: ctpop_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: cnt z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @ctpop_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: ctpop_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: cnt z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @ctpop_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: ctpop_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: cnt z0.d, p0/m, z0.d +; CHECK-NEXT: cnt z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; Count trailing zeros +; + +define <4 x i8> @cttz_v4i8(<4 x i8> %op) #0 { +; CHECK-LABEL: cttz_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI28_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI28_0] +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> %op) + ret <4 x i8> %res +} + +define <8 x i8> @cttz_v8i8(<8 x i8> %op) #0 { +; CHECK-LABEL: cttz_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> %op) + ret <8 x i8> %res +} + +define <16 x i8> @cttz_v16i8(<16 x i8> %op) #0 { +; CHECK-LABEL: cttz_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %op) + ret <16 x i8> %res +} + +define void @cttz_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: cttz_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: rbit z0.b, p0/m, z0.b +; CHECK-NEXT: clz z0.b, p0/m, z0.b +; CHECK-NEXT: rbit z1.b, p0/m, z1.b +; CHECK-NEXT: clz z1.b, p0/m, z1.b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %op) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <2 x i16> @cttz_v2i16(<2 x i16> %op) #0 { +; CHECK-LABEL: cttz_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI32_0 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI32_0] +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> %op) + ret <2 x i16> %res +} + +define <4 x i16> @cttz_v4i16(<4 x i16> %op) #0 { +; CHECK-LABEL: cttz_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> %op) + ret <4 x i16> %res +} + +define <8 x i16> @cttz_v8i16(<8 x i16> %op) #0 { +; CHECK-LABEL: cttz_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %op) + ret <8 x i16> %res +} + +define void @cttz_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: cttz_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: rbit z0.h, p0/m, z0.h +; CHECK-NEXT: clz z0.h, p0/m, z0.h +; CHECK-NEXT: rbit z1.h, p0/m, z1.h +; CHECK-NEXT: clz z1.h, p0/m, z1.h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %op) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @cttz_v2i32(<2 x i32> %op) #0 { +; CHECK-LABEL: cttz_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %op) + ret <2 x i32> %res +} + +define <4 x i32> @cttz_v4i32(<4 x i32> %op) #0 { +; CHECK-LABEL: cttz_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %op) + ret <4 x i32> %res +} + +define void @cttz_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: cttz_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: rbit z0.s, p0/m, z0.s +; CHECK-NEXT: clz z0.s, p0/m, z0.s +; CHECK-NEXT: rbit z1.s, p0/m, z1.s +; CHECK-NEXT: clz z1.s, p0/m, z1.s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %op) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define <1 x i64> @cttz_v1i64(<1 x i64> %op) #0 { +; CHECK-LABEL: cttz_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %op) + ret <1 x i64> %res +} + +define <2 x i64> @cttz_v2i64(<2 x i64> %op) #0 { +; CHECK-LABEL: cttz_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %op) + ret <2 x i64> %res +} + +define void @cttz_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: cttz_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: rbit z0.d, p0/m, z0.d +; CHECK-NEXT: clz z0.d, p0/m, z0.d +; CHECK-NEXT: rbit z1.d, p0/m, z1.d +; CHECK-NEXT: clz z1.d, p0/m, z1.d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %op) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } + +declare <4 x i8> @llvm.ctlz.v4i8(<4 x i8>) +declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>) +declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>) +declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>) +declare <2 x i16> @llvm.ctlz.v2i16(<2 x i16>) +declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>) +declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>) +declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>) +declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>) +declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>) +declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>) +declare <1 x i64> @llvm.ctlz.v1i64(<1 x i64>) +declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) +declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>) + +declare <4 x i8> @llvm.ctpop.v4i8(<4 x i8>) +declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>) +declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) +declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>) +declare <2 x i16> @llvm.ctpop.v2i16(<2 x i16>) +declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>) +declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) +declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) +declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) +declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) +declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) +declare <1 x i64> @llvm.ctpop.v1i64(<1 x i64>) +declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) +declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) + +declare <4 x i8> @llvm.cttz.v4i8(<4 x i8>) +declare <8 x i8> @llvm.cttz.v8i8(<8 x i8>) +declare <16 x i8> @llvm.cttz.v16i8(<16 x i8>) +declare <32 x i8> @llvm.cttz.v32i8(<32 x i8>) +declare <2 x i16> @llvm.cttz.v2i16(<2 x i16>) +declare <4 x i16> @llvm.cttz.v4i16(<4 x i16>) +declare <8 x i16> @llvm.cttz.v8i16(<8 x i16>) +declare <16 x i16> @llvm.cttz.v16i16(<16 x i16>) +declare <2 x i32> @llvm.cttz.v2i32(<2 x i32>) +declare <4 x i32> @llvm.cttz.v4i32(<4 x i32>) +declare <8 x i32> @llvm.cttz.v8i32(<8 x i32>) +declare <1 x i64> @llvm.cttz.v1i64(<1 x i64>) +declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) +declare <4 x i64> @llvm.cttz.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitselect.ll @@ -0,0 +1,47 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64" + +; +; NOTE: SVE lowering for the BSP pseudoinst is not currently implemented, so we +; don't currently expect the code below to lower to BSL/BIT/BIF. Once +; this is implemented, this test will be fleshed out. +; + +define <8 x i32> @fixed_bitselect_v8i32(<8 x i32>* %pre_cond_ptr, <8 x i32>* %left_ptr, <8 x i32>* %right_ptr) #0 { +; CHECK-LABEL: fixed_bitselect_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: adrp x8, .LCPI0_1 +; CHECK-NEXT: ldp q3, q4, [x1] +; CHECK-NEXT: sub z6.s, z2.s, z1.s +; CHECK-NEXT: sub z2.s, z2.s, z0.s +; CHECK-NEXT: and z3.d, z6.d, z3.d +; CHECK-NEXT: ldp q7, q16, [x2] +; CHECK-NEXT: and z2.d, z2.d, z4.d +; CHECK-NEXT: ldr q5, [x8, :lo12:.LCPI0_1] +; CHECK-NEXT: add z1.s, z1.s, z5.s +; CHECK-NEXT: add z0.s, z0.s, z5.s +; CHECK-NEXT: and z4.d, z0.d, z16.d +; CHECK-NEXT: and z0.d, z1.d, z7.d +; CHECK-NEXT: orr z0.d, z0.d, z3.d +; CHECK-NEXT: orr z1.d, z4.d, z2.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %pre_cond = load <8 x i32>, <8 x i32>* %pre_cond_ptr + %left = load <8 x i32>, <8 x i32>* %left_ptr + %right = load <8 x i32>, <8 x i32>* %right_ptr + + %neg_cond = sub <8 x i32> zeroinitializer, %pre_cond + %min_cond = add <8 x i32> %pre_cond, + %left_bits_0 = and <8 x i32> %neg_cond, %left + %right_bits_0 = and <8 x i32> %min_cond, %right + %bsl0000 = or <8 x i32> %right_bits_0, %left_bits_0 + ret <8 x i32> %bsl0000 +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll @@ -0,0 +1,414 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; insertelement +; + +; i8 +define <4 x i8> @insertelement_v4i8(<4 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x i8> %op1, i8 5, i64 3 + ret <4 x i8> %r +} + +define <8 x i8> @insertelement_v8i8(<8 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.b, w8 +; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z1.b +; CHECK-NEXT: mov z0.b, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <8 x i8> %op1, i8 5, i64 7 + ret <8 x i8> %r +} + +define <16 x i8> @insertelement_v16i8(<16 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #15 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.b, w8 +; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z1.b +; CHECK-NEXT: mov z0.b, p0/m, w9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <16 x i8> %op1, i8 5, i64 15 + ret <16 x i8> %r +} + +define <32 x i8> @insertelement_v32i8(<32 x i8> %op1) #0 { +; CHECK-LABEL: insertelement_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #15 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z3.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z2.b, w8 +; CHECK-NEXT: cmpeq p0.b, p0/z, z3.b, z2.b +; CHECK-NEXT: mov z1.b, p0/m, w9 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %r = insertelement <32 x i8> %op1, i8 5, i64 31 + ret <32 x i8> %r +} + +; i16 +define <2 x i16> @insertelement_v2i16(<2 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x i16> %op1, i16 5, i64 1 + ret <2 x i16> %r +} + +define <4 x i16> @insertelement_v4i16(<4 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x i16> %op1, i16 5, i64 3 + ret <4 x i16> %r +} + +define <8 x i16> @insertelement_v8i16(<8 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p0/m, w9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <8 x i16> %op1, i16 5, i64 7 + ret <8 x i16> %r +} + +define <16 x i16> @insertelement_v16i16(<16 x i16> %op1) #0 { +; CHECK-LABEL: insertelement_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z3.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z1.h, p0/m, w9 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %r = insertelement <16 x i16> %op1, i16 5, i64 15 + ret <16 x i16> %r +} + +;i32 +define <2 x i32> @insertelement_v2i32(<2 x i32> %op1) #0 { +; CHECK-LABEL: insertelement_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z1.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w9 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x i32> %op1, i32 5, i64 1 + ret <2 x i32> %r +} + +define <4 x i32> @insertelement_v4i32(<4 x i32> %op1) #0 { +; CHECK-LABEL: insertelement_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x i32> %op1, i32 5, i64 3 + ret <4 x i32> %r +} + +define <8 x i32> @insertelement_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: insertelement_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, w8 +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z1.s, p0/m, w8 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %r = insertelement <8 x i32> %op1, i32 5, i64 7 + ret <8 x i32> %r +} + +;i64 +define <1 x i64> @insertelement_v1i64(<1 x i64> %op1) #0 { +; CHECK-LABEL: insertelement_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: fmov d0, x8 +; CHECK-NEXT: ret + %r = insertelement <1 x i64> %op1, i64 5, i64 0 + ret <1 x i64> %r +} + +define <2 x i64> @insertelement_v2i64(<2 x i64> %op1) #0 { +; CHECK-LABEL: insertelement_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: mov w9, #5 +; CHECK-NEXT: index z2.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z1.d, x8 +; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z1.d +; CHECK-NEXT: mov z0.d, p0/m, x9 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x i64> %op1, i64 5, i64 1 + ret <2 x i64> %r +} + +define <4 x i64> @insertelement_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: insertelement_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: index z3.d, #0, #1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, x8 +; CHECK-NEXT: mov w8, #5 +; CHECK-NEXT: cmpeq p0.d, p0/z, z3.d, z2.d +; CHECK-NEXT: mov z1.d, p0/m, x8 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %r = insertelement <4 x i64> %op1, i64 5, i64 3 + ret <4 x i64> %r +} + +;f16 +define <2 x half> @insertelement_v2f16(<2 x half> %op1) #0 { +; CHECK-LABEL: insertelement_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: fmov h1, #5.00000000 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: str h0, [sp, #8] +; CHECK-NEXT: str h1, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %r = insertelement <2 x half> %op1, half 5.0, i64 1 + ret <2 x half> %r +} + +define <4 x half> @insertelement_v4f16(<4 x half> %op1) #0 { +; CHECK-LABEL: insertelement_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: fmov h1, #5.00000000 +; CHECK-NEXT: index z3.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, h1 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x half> %op1, half 5.0, i64 3 + ret <4 x half> %r +} + +define <8 x half> @insertelement_v8f16(<8 x half> %op1) #0 { +; CHECK-LABEL: insertelement_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: fmov h1, #5.00000000 +; CHECK-NEXT: index z3.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z3.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, h1 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <8 x half> %op1, half 5.0, i64 7 + ret <8 x half> %r +} + +define <16 x half> @insertelement_v16f16(<16 x half>* %a) #0 { +; CHECK-LABEL: insertelement_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov w8, #7 +; CHECK-NEXT: fmov h3, #5.00000000 +; CHECK-NEXT: index z4.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov z2.h, w8 +; CHECK-NEXT: cmpeq p0.h, p0/z, z4.h, z2.h +; CHECK-NEXT: mov z1.h, p0/m, h3 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %r = insertelement <16 x half> %op1, half 5.0, i64 15 + ret <16 x half> %r +} + +;f32 +define <2 x float> @insertelement_v2f32(<2 x float> %op1) #0 { +; CHECK-LABEL: insertelement_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: fmov s1, #5.00000000 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: mov z2.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, s1 +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x float> %op1, float 5.0, i64 1 + ret <2 x float> %r +} + +define <4 x float> @insertelement_v4f32(<4 x float> %op1) #0 { +; CHECK-LABEL: insertelement_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: fmov s1, #5.00000000 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z2.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, s1 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <4 x float> %op1, float 5.0, i64 3 + ret <4 x float> %r +} + +define <8 x float> @insertelement_v8f32(<8 x float>* %a) #0 { +; CHECK-LABEL: insertelement_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov w8, #3 +; CHECK-NEXT: fmov s4, #5.00000000 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z3.s, w8 +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z3.s +; CHECK-NEXT: mov z1.s, p0/m, s4 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %r = insertelement <8 x float> %op1, float 5.0, i64 7 + ret <8 x float> %r +} + +;f64 +define <1 x double> @insertelement_v1f64(<1 x double> %op1) #0 { +; CHECK-LABEL: insertelement_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov d0, #5.00000000 +; CHECK-NEXT: ret + %r = insertelement <1 x double> %op1, double 5.0, i64 0 + ret <1 x double> %r +} + +define <2 x double> @insertelement_v2f64(<2 x double> %op1) #0 { +; CHECK-LABEL: insertelement_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: fmov d1, #5.00000000 +; CHECK-NEXT: index z3.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: mov z2.d, x8 +; CHECK-NEXT: cmpeq p0.d, p0/z, z3.d, z2.d +; CHECK-NEXT: mov z0.d, p0/m, d1 +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %r = insertelement <2 x double> %op1, double 5.0, i64 1 + ret <2 x double> %r +} + +define <4 x double> @insertelement_v4f64(<4 x double>* %a) #0 { +; CHECK-LABEL: insertelement_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: fmov d3, #5.00000000 +; CHECK-NEXT: index z4.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, x8 +; CHECK-NEXT: cmpeq p0.d, p0/z, z4.d, z2.d +; CHECK-NEXT: mov z1.d, p0/m, d3 +; CHECK-NEXT: // kill: def $q1 killed $q1 killed $z1 +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %r = insertelement <4 x double> %op1, double 5.0, i64 3 + ret <4 x double> %r +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-compare.ll @@ -0,0 +1,464 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; ICMP EQ +; + +define <4 x i8> @icmp_eq_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i8> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i8> + ret <4 x i8> %sext +} + +define <8 x i8> @icmp_eq_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <8 x i8> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i8> + ret <8 x i8> %sext +} + +define <16 x i8> @icmp_eq_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: icmp_eq_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <16 x i8> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i8> + ret <16 x i8> %sext +} + +define void @icmp_eq_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: icmp_eq_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z2.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %cmp = icmp eq <32 x i8> %op1, %op2 + %sext = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %sext, <32 x i8>* %a + ret void +} + +define <2 x i16> @icmp_eq_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i16> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i16> + ret <2 x i16> %sext +} + +define <4 x i16> @icmp_eq_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i16> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i16> + ret <4 x i16> %sext +} + +define <8 x i16> @icmp_eq_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: icmp_eq_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <8 x i16> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i16> + ret <8 x i16> %sext +} + +define void @icmp_eq_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_eq_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp eq <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +define <2 x i32> @icmp_eq_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i32> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i32> + ret <2 x i32> %sext +} + +define <4 x i32> @icmp_eq_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: icmp_eq_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <4 x i32> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i32> + ret <4 x i32> %sext +} + +define void @icmp_eq_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_eq_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z2.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp eq <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +define <1 x i64> @icmp_eq_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 { +; CHECK-LABEL: icmp_eq_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <1 x i64> %op1, %op2 + %sext = sext <1 x i1> %cmp to <1 x i64> + ret <1 x i64> %sext +} + +define <2 x i64> @icmp_eq_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: icmp_eq_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %cmp = icmp eq <2 x i64> %op1, %op2 + %sext = sext <2 x i1> %cmp to <2 x i64> + ret <2 x i64> %sext +} + +define void @icmp_eq_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_eq_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp eq <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP NE +; + +define void @icmp_ne_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: icmp_ne_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpne p1.b, p0/z, z0.b, z2.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %cmp = icmp ne <32 x i8> %op1, %op2 + %sext = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %sext, <32 x i8>* %a + ret void +} + +; +; ICMP SGE +; + +define void @icmp_sge_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_sge_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpge p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpge p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp sge <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +; +; ICMP SGT +; + +define void @icmp_sgt_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: icmp_sgt_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpgt p1.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpgt p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %cmp = icmp sgt <16 x i16> %op1, %op2 + %sext = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %sext, <16 x i16>* %a + ret void +} + +; +; ICMP SLE +; + +define void @icmp_sle_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_sle_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpge p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpge p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp sle <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +; +; ICMP SLT +; + +define void @icmp_slt_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: icmp_slt_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %cmp = icmp slt <8 x i32> %op1, %op2 + %sext = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %sext, <8 x i32>* %a + ret void +} + +; +; ICMP UGE +; + +define void @icmp_uge_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_uge_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphs p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphs p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp uge <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP UGT +; + +define void @icmp_ugt_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ugt_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ugt <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP ULE +; + +define void @icmp_ule_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ule_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphs p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphs p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ule <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +; +; ICMP ULT +; + +define void @icmp_ult_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: icmp_ult_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z0.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %cmp = icmp ult <4 x i64> %op1, %op2 + %sext = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %sext, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll @@ -10,34 +10,21 @@ define <4 x i8> @sdiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: sdiv_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI0_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] -; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: asr z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: sunpklo z1.s, z1.h ; CHECK-NEXT: sunpklo z0.s, z0.h +; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sdiv <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -46,8 +33,6 @@ define <8 x i8> @sdiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: sdiv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -60,31 +45,8 @@ ; CHECK-NEXT: sdivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z2.h, z0.h[6] -; CHECK-NEXT: mov z3.h, z0.h[5] -; CHECK-NEXT: mov z4.h, z0.h[4] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z0.h[3] -; CHECK-NEXT: mov z6.h, z0.h[2] -; CHECK-NEXT: mov z0.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sdiv <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -196,27 +158,14 @@ define <4 x i16> @sdiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: sdiv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: sunpklo z1.s, z1.h ; CHECK-NEXT: sunpklo z0.s, z0.h ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = sdiv <4 x i16> %op1, %op2 ret <4 x i16> %res @@ -363,31 +312,18 @@ define <4 x i8> @udiv_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: udiv_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI14_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI14_0] -; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: uunpklo z1.s, z1.h ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = udiv <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -396,8 +332,6 @@ define <8 x i8> @udiv_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: udiv_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -410,31 +344,8 @@ ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z2.h, z0.h[6] -; CHECK-NEXT: mov z3.h, z0.h[5] -; CHECK-NEXT: mov z4.h, z0.h[4] -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: mov z5.h, z0.h[3] -; CHECK-NEXT: mov z6.h, z0.h[2] -; CHECK-NEXT: mov z0.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s0 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = udiv <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -544,27 +455,14 @@ define <4 x i16> @udiv_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: udiv_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: uunpklo z1.s, z1.h ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: mov z1.s, z0.s[3] -; CHECK-NEXT: mov z2.s, z0.s[2] -; CHECK-NEXT: mov z0.s, z0.s[1] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: fmov w10, s2 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s0 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d0, [sp, #8] -; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 ; CHECK-NEXT: ret %res = udiv <4 x i16> %op1, %op2 ret <4 x i16> %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-immediates.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; Although SVE immediate packing should be fully tested using scalable vectors, +; these tests protects against the possibility that scalable nodes, resulting +; from lowering fixed length vector operations, trigger different isel patterns. + +; +; ADD +; + +define void @add_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: add_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: add z1.b, z1.b, z0.b +; CHECK-NEXT: add z0.b, z2.b, z0.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i32 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = add <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @add_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: add_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: add z1.h, z1.h, z0.h +; CHECK-NEXT: add z0.h, z2.h, z0.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = add <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @add_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: add_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: add z1.s, z1.s, z0.s +; CHECK-NEXT: add z0.s, z2.s, z0.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = add <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @add_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: add_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: add z1.d, z1.d, z0.d +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = add <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; AND +; + +define void @and_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: and_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i32 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = and <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @and_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: and_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = and <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @and_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: and_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = and <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @and_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: and_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: and z1.d, z1.d, z0.d +; CHECK-NEXT: and z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = and <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; ASHR +; + +define void @ashr_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: ashr_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: asr z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: asrr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i32 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = ashr <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @ashr_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: ashr_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: asr z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = ashr <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @ashr_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: ashr_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI10_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI10_0] +; CHECK-NEXT: asr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: asrr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = ashr <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @ashr_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: ashr_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI11_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI11_0] +; CHECK-NEXT: asr z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: asrr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = ashr <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; ICMP +; + +define void @icmp_eq_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: icmp_eq_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI12_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI12_0] +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z0.b +; CHECK-NEXT: cmpeq p0.b, p0/z, z2.b, z0.b +; CHECK-NEXT: mov z0.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %cmp = icmp eq <32 x i8> %op1, %op2 + %res = sext <32 x i1> %cmp to <32 x i8> + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @icmp_sge_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: icmp_sge_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI13_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI13_0] +; CHECK-NEXT: cmpge p1.h, p0/z, z1.h, z0.h +; CHECK-NEXT: cmpge p0.h, p0/z, z2.h, z0.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %cmp = icmp sge <16 x i16> %op1, %op2 + %res = sext <16 x i1> %cmp to <16 x i16> + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @icmp_sgt_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: icmp_sgt_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: cmpgt p1.s, p0/z, z1.s, z0.s +; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 -8, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %cmp = icmp sgt <8 x i32> %op1, %op2 + %res = sext <8 x i1> %cmp to <8 x i32> + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @icmp_ult_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: icmp_ult_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI15_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI15_0] +; CHECK-NEXT: cmphi p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z2.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %cmp = icmp ult <4 x i64> %op1, %op2 + %res = sext <4 x i1> %cmp to <4 x i64> + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; LSHR +; + +define void @lshr_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: lshr_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI16_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI16_0] +; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: lsrr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = lshr <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @lshr_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: lshr_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI17_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: lsrr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = lshr <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @lshr_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: lshr_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lsrr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = lshr <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @lshr_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: lshr_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI19_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI19_0] +; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: lsrr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = lshr <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; MUL +; + +define void @mul_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: mul_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI20_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI20_0] +; CHECK-NEXT: mul z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = mul <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @mul_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: mul_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI21_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI21_0] +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = mul <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @mul_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: mul_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI22_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI22_0] +; CHECK-NEXT: mul z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = mul <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @mul_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: mul_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI23_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI23_0] +; CHECK-NEXT: mul z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = mul <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; OR +; + +define void @or_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: or_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI24_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI24_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = or <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @or_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: or_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI25_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI25_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = or <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @or_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: or_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI26_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI26_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = or <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @or_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: or_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI27_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI27_0] +; CHECK-NEXT: orr z1.d, z1.d, z0.d +; CHECK-NEXT: orr z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = or <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SHL +; + +define void @shl_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: shl_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI28_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI28_0] +; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: lslr z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = shl <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @shl_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: shl_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI29_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI29_0] +; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: lslr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = shl <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @shl_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: shl_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI30_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI30_0] +; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: lslr z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = shl <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @shl_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: shl_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI31_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI31_0] +; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: lslr z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = shl <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SMAX +; + +define void @smax_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: smax_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI32_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI32_0] +; CHECK-NEXT: smax z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @smax_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: smax_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI33_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI33_0] +; CHECK-NEXT: smax z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @smax_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: smax_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI34_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI34_0] +; CHECK-NEXT: smax z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @smax_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: smax_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI35_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI35_0] +; CHECK-NEXT: smax z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SMIN +; + +define void @smin_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: smin_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI36_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI36_0] +; CHECK-NEXT: smin z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @smin_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: smin_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI37_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI37_0] +; CHECK-NEXT: smin z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @smin_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: smin_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI38_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI38_0] +; CHECK-NEXT: smin z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @smin_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: smin_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI39_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI39_0] +; CHECK-NEXT: smin z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SUB +; + +define void @sub_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: sub_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI40_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI40_0] +; CHECK-NEXT: sub z1.b, z1.b, z0.b +; CHECK-NEXT: sub z0.b, z2.b, z0.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = sub <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @sub_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: sub_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI41_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI41_0] +; CHECK-NEXT: sub z1.h, z1.h, z0.h +; CHECK-NEXT: sub z0.h, z2.h, z0.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = sub <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @sub_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: sub_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI42_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI42_0] +; CHECK-NEXT: sub z1.s, z1.s, z0.s +; CHECK-NEXT: sub z0.s, z2.s, z0.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = sub <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @sub_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: sub_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI43_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI43_0] +; CHECK-NEXT: sub z1.d, z1.d, z0.d +; CHECK-NEXT: sub z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = sub <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; UMAX +; + +define void @umax_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: umax_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI44_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI44_0] +; CHECK-NEXT: umax z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @umax_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: umax_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI45_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI45_0] +; CHECK-NEXT: umax z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @umax_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: umax_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI46_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI46_0] +; CHECK-NEXT: umax z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @umax_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: umax_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI47_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI47_0] +; CHECK-NEXT: umax z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; UMIN +; + +define void @umin_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: umin_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI48_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI48_0] +; CHECK-NEXT: umin z1.b, p0/m, z1.b, z0.b +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @umin_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: umin_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI49_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI49_0] +; CHECK-NEXT: umin z1.h, p0/m, z1.h, z0.h +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @umin_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: umin_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI50_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI50_0] +; CHECK-NEXT: umin z1.s, p0/m, z1.s, z0.s +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @umin_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: umin_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI51_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI51_0] +; CHECK-NEXT: umin z1.d, p0/m, z1.d, z0.d +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; XOR +; + +define void @xor_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: xor_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI52_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI52_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %ins = insertelement <32 x i8> undef, i8 7, i64 0 + %op2 = shufflevector <32 x i8> %ins, <32 x i8> undef, <32 x i32> zeroinitializer + %res = xor <32 x i8> %op1, %op2 + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define void @xor_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: xor_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI53_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI53_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %ins = insertelement <16 x i16> undef, i16 15, i64 0 + %op2 = shufflevector <16 x i16> %ins, <16 x i16> undef, <16 x i32> zeroinitializer + %res = xor <16 x i16> %op1, %op2 + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define void @xor_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: xor_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI54_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI54_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %ins = insertelement <8 x i32> undef, i32 31, i64 0 + %op2 = shufflevector <8 x i32> %ins, <8 x i32> undef, <8 x i32> zeroinitializer + %res = xor <8 x i32> %op1, %op2 + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +define void @xor_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: xor_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, .LCPI55_0 +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI55_0] +; CHECK-NEXT: eor z1.d, z1.d, z0.d +; CHECK-NEXT: eor z0.d, z2.d, z0.d +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %ins = insertelement <4 x i64> undef, i64 63, i64 0 + %op2 = shufflevector <4 x i64> %ins, <4 x i64> undef, <4 x i32> zeroinitializer + %res = xor <4 x i64> %op1, %op2 + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) + +declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) + +declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>) + +declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) +declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll @@ -0,0 +1,754 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; SMAX +; + +define <8 x i8> @smax_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.smax.v8i8(<8 x i8> %op1, <8 x i8> %op2) + ret <8 x i8> %res +} + +define <16 x i8> @smax_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.smax.v16i8(<16 x i8> %op1, <16 x i8> %op2) + ret <16 x i8> %res +} + +define void @smax_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <4 x i16> @smax_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.smax.v4i16(<4 x i16> %op1, <4 x i16> %op2) + ret <4 x i16> %res +} + +define <8 x i16> @smax_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.smax.v8i16(<8 x i16> %op1, <8 x i16> %op2) + ret <8 x i16> %res +} + +define void @smax_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @smax_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %op1, <2 x i32> %op2) + ret <2 x i32> %res +} + +define <4 x i32> @smax_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %op1, <4 x i32> %op2) + ret <4 x i32> %res +} + +define void @smax_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +; Vector i64 max are not legal for NEON so use SVE when available. +define <1 x i64> @smax_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.smax.v1i64(<1 x i64> %op1, <1 x i64> %op2) + ret <1 x i64> %res +} + +; Vector i64 max are not legal for NEON so use SVE when available. +define <2 x i64> @smax_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %op1, <2 x i64> %op2) + ret <2 x i64> %res +} + +define void @smax_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smax_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; SMIN +; + +define <8 x i8> @smin_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.smin.v8i8(<8 x i8> %op1, <8 x i8> %op2) + ret <8 x i8> %res +} + +define <16 x i8> @smin_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %op1, <16 x i8> %op2) + ret <16 x i8> %res +} + +define void @smin_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <4 x i16> @smin_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.smin.v4i16(<4 x i16> %op1, <4 x i16> %op2) + ret <4 x i16> %res +} + +define <8 x i16> @smin_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.smin.v8i16(<8 x i16> %op1, <8 x i16> %op2) + ret <8 x i16> %res +} + +define void @smin_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @smin_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %op1, <2 x i32> %op2) + ret <2 x i32> %res +} + +define <4 x i32> @smin_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %op1, <4 x i32> %op2) + ret <4 x i32> %res +} + +define void @smin_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +; Vector i64 min are not legal for NEON so use SVE when available. +define <1 x i64> @smin_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.smin.v1i64(<1 x i64> %op1, <1 x i64> %op2) + ret <1 x i64> %res +} + +; Vector i64 min are not legal for NEON so use SVE when available. +define <2 x i64> @smin_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %op1, <2 x i64> %op2) + ret <2 x i64> %res +} + +define void @smin_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: smin_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; UMAX +; + +define <8 x i8> @umax_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.umax.v8i8(<8 x i8> %op1, <8 x i8> %op2) + ret <8 x i8> %res +} + +define <16 x i8> @umax_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %op1, <16 x i8> %op2) + ret <16 x i8> %res +} + +define void @umax_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <4 x i16> @umax_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.umax.v4i16(<4 x i16> %op1, <4 x i16> %op2) + ret <4 x i16> %res +} + +define <8 x i16> @umax_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.umax.v8i16(<8 x i16> %op1, <8 x i16> %op2) + ret <8 x i16> %res +} + +define void @umax_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @umax_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.umax.v2i32(<2 x i32> %op1, <2 x i32> %op2) + ret <2 x i32> %res +} + +define <4 x i32> @umax_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %op1, <4 x i32> %op2) + ret <4 x i32> %res +} + +define void @umax_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +; Vector i64 max are not legal for NEON so use SVE when available. +define <1 x i64> @umax_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.umax.v1i64(<1 x i64> %op1, <1 x i64> %op2) + ret <1 x i64> %res +} + +; Vector i64 max are not legal for NEON so use SVE when available. +define <2 x i64> @umax_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %op1, <2 x i64> %op2) + ret <2 x i64> %res +} + +define void @umax_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umax_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +; +; UMIN +; + +define <8 x i8> @umin_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i8> @llvm.umin.v8i8(<8 x i8> %op1, <8 x i8> %op2) + ret <8 x i8> %res +} + +define <16 x i8> @umin_v16i8(<16 x i8> %op1, <16 x i8> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %op1, <16 x i8> %op2) + ret <16 x i8> %res +} + +define void @umin_v32i8(<32 x i8>* %a, <32 x i8>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %res = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %op1, <32 x i8> %op2) + store <32 x i8> %res, <32 x i8>* %a + ret void +} + +define <4 x i16> @umin_v4i16(<4 x i16> %op1, <4 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i16> @llvm.umin.v4i16(<4 x i16> %op1, <4 x i16> %op2) + ret <4 x i16> %res +} + +define <8 x i16> @umin_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <8 x i16> @llvm.umin.v8i16(<8 x i16> %op1, <8 x i16> %op2) + ret <8 x i16> %res +} + +define void @umin_v16i16(<16 x i16>* %a, <16 x i16>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %res = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %op1, <16 x i16> %op2) + store <16 x i16> %res, <16 x i16>* %a + ret void +} + +define <2 x i32> @umin_v2i32(<2 x i32> %op1, <2 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i32> @llvm.umin.v2i32(<2 x i32> %op1, <2 x i32> %op2) + ret <2 x i32> %res +} + +define <4 x i32> @umin_v4i32(<4 x i32> %op1, <4 x i32> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %op1, <4 x i32> %op2) + ret <4 x i32> %res +} + +define void @umin_v8i32(<8 x i32>* %a, <8 x i32>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %res = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %op1, <8 x i32> %op2) + store <8 x i32> %res, <8 x i32>* %a + ret void +} + +; Vector i64 min are not legal for NEON so use SVE when available. +define <1 x i64> @umin_v1i64(<1 x i64> %op1, <1 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl1 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %res = call <1 x i64> @llvm.umin.v1i64(<1 x i64> %op1, <1 x i64> %op2) + ret <1 x i64> %res +} + +; Vector i64 min are not legal for NEON so use SVE when available. +define <2 x i64> @umin_v2i64(<2 x i64> %op1, <2 x i64> %op2) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %res = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %op1, <2 x i64> %op2) + ret <2 x i64> %res +} + +define void @umin_v4i64(<4 x i64>* %a, <4 x i64>* %b) vscale_range(2,0) #0 { +; CHECK-LABEL: umin_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1] +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %res = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %op1, <4 x i64> %op2) + store <4 x i64> %res, <4 x i64>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" } + +declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>) +declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.smin.v32i8(<32 x i8>, <32 x i8>) +declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>) +declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) +declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>) +declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) +declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) +declare <1 x i64> @llvm.smin.v1i64(<1 x i64>, <1 x i64>) +declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) +declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) + +declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>) +declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.smax.v32i8(<32 x i8>, <32 x i8>) +declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>) +declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) +declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>) +declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) +declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>) +declare <1 x i64> @llvm.smax.v1i64(<1 x i64>, <1 x i64>) +declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>) +declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>) + +declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>) +declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.umin.v32i8(<32 x i8>, <32 x i8>) +declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>) +declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) +declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>) +declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>) +declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>) +declare <1 x i64> @llvm.umin.v1i64(<1 x i64>, <1 x i64>) +declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>) +declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>) + +declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>) +declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.umax.v32i8(<32 x i8>, <32 x i8>) +declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>) +declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) +declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>) +declare <2 x i32> @llvm.umax.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) +declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>) +declare <1 x i64> @llvm.umax.v1i64(<1 x i64>, <1 x i64>) +declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>) +declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll @@ -77,131 +77,41 @@ define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { ; CHECK-LABEL: smulh_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: adrp x8, .LCPI3_0 ; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: sunpklo z0.h, z2.b -; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 -; CHECK-NEXT: sunpklo z2.h, z2.b -; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: sunpklo z4.h, z1.b +; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8 +; CHECK-NEXT: sunpklo z1.h, z1.b +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: sunpklo z5.h, z0.b +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: sunpklo z0.h, z0.b ; CHECK-NEXT: sunpklo z6.h, z3.b ; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8 ; CHECK-NEXT: sunpklo z3.h, z3.b -; CHECK-NEXT: sunpklo z1.h, z4.b -; CHECK-NEXT: ext z4.b, z4.b, z4.b, #8 -; CHECK-NEXT: sunpklo z4.h, z4.b -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: sunpklo z7.h, z5.b -; CHECK-NEXT: ext z5.b, z5.b, z5.b, #8 -; CHECK-NEXT: ldr q16, [x8, :lo12:.LCPI3_0] -; CHECK-NEXT: sunpklo z5.h, z5.b -; CHECK-NEXT: mul z3.h, p0/m, z3.h, z5.h -; CHECK-NEXT: movprfx z5, z6 +; CHECK-NEXT: sunpklo z7.h, z2.b +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: sunpklo z2.h, z2.b +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: movprfx z3, z4 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h ; CHECK-NEXT: mul z5.h, p0/m, z5.h, z7.h -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z4.h ; CHECK-NEXT: movprfx z4, z5 -; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z16.h -; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z16.h -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z5.h, z3.h[7] -; CHECK-NEXT: mov z6.h, z3.h[6] -; CHECK-NEXT: mov z7.h, z3.h[5] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w9, [sp, #16] -; CHECK-NEXT: strb w8, [sp, #24] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: mov z17.h, z3.h[4] -; CHECK-NEXT: mov z18.h, z3.h[3] -; CHECK-NEXT: mov z19.h, z3.h[2] -; CHECK-NEXT: strb w10, [sp, #31] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #30] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #29] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z3.h[1] -; CHECK-NEXT: mov z3.h, z4.h[7] -; CHECK-NEXT: mov z21.h, z4.h[6] -; CHECK-NEXT: strb w10, [sp, #28] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #27] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #26] -; CHECK-NEXT: fmov w9, s21 -; CHECK-NEXT: mov z22.h, z4.h[5] -; CHECK-NEXT: mov z23.h, z4.h[4] -; CHECK-NEXT: mov z24.h, z4.h[3] -; CHECK-NEXT: strb w10, [sp, #25] -; CHECK-NEXT: fmov w10, s22 -; CHECK-NEXT: strb w8, [sp, #23] -; CHECK-NEXT: fmov w8, s23 -; CHECK-NEXT: strb w9, [sp, #22] -; CHECK-NEXT: fmov w9, s24 -; CHECK-NEXT: mov z25.h, z4.h[2] -; CHECK-NEXT: mov z26.h, z4.h[1] -; CHECK-NEXT: strb w10, [sp, #21] -; CHECK-NEXT: fmov w10, s25 -; CHECK-NEXT: strb w8, [sp, #20] -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z16.h -; CHECK-NEXT: strb w9, [sp, #19] -; CHECK-NEXT: fmov w8, s26 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z16.h -; CHECK-NEXT: mov z2.h, z1.h[7] -; CHECK-NEXT: mov z3.h, z1.h[6] -; CHECK-NEXT: strb w10, [sp, #18] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strb w8, [sp, #17] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strb w9, [sp, #8] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z1.h[5] -; CHECK-NEXT: mov z5.h, z1.h[4] -; CHECK-NEXT: mov z6.h, z1.h[3] -; CHECK-NEXT: strb w10, [sp] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #15] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #14] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z1.h[2] -; CHECK-NEXT: mov z16.h, z1.h[1] -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: strb w10, [sp, #13] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #12] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #11] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z17.h, z0.h[6] -; CHECK-NEXT: mov z18.h, z0.h[5] -; CHECK-NEXT: mov z19.h, z0.h[4] -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #9] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z0.h[3] -; CHECK-NEXT: mov z21.h, z0.h[2] -; CHECK-NEXT: mov z22.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s21 -; CHECK-NEXT: strb w9, [sp, #4] -; CHECK-NEXT: fmov w9, s22 -; CHECK-NEXT: strb w10, [sp, #3] -; CHECK-NEXT: strb w8, [sp, #2] -; CHECK-NEXT: strb w9, [sp, #1] -; CHECK-NEXT: ldp q0, q1, [sp] -; CHECK-NEXT: stp q0, q1, [x0] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z2.h +; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z2.b, z3.b, z3.b +; CHECK-NEXT: uzp1 z3.b, z4.b, z4.b +; CHECK-NEXT: splice z2.b, p0, z2.b, z1.b +; CHECK-NEXT: splice z3.b, p0, z3.b, z0.b +; CHECK-NEXT: stp q2, q3, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b @@ -516,131 +426,41 @@ define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { ; CHECK-LABEL: umulh_v32i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: ldp q2, q3, [x0] +; CHECK-NEXT: ldp q1, q0, [x0] ; CHECK-NEXT: adrp x8, .LCPI17_0 ; CHECK-NEXT: ptrue p0.h, vl8 -; CHECK-NEXT: uunpklo z0.h, z2.b -; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 -; CHECK-NEXT: uunpklo z2.h, z2.b -; CHECK-NEXT: ldp q4, q5, [x1] +; CHECK-NEXT: uunpklo z4.h, z1.b +; CHECK-NEXT: ext z1.b, z1.b, z1.b, #8 +; CHECK-NEXT: uunpklo z1.h, z1.b +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: uunpklo z5.h, z0.b +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpklo z6.h, z3.b ; CHECK-NEXT: ext z3.b, z3.b, z3.b, #8 ; CHECK-NEXT: uunpklo z3.h, z3.b -; CHECK-NEXT: uunpklo z1.h, z4.b -; CHECK-NEXT: ext z4.b, z4.b, z4.b, #8 -; CHECK-NEXT: uunpklo z4.h, z4.b -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: uunpklo z7.h, z5.b -; CHECK-NEXT: ext z5.b, z5.b, z5.b, #8 -; CHECK-NEXT: ldr q16, [x8, :lo12:.LCPI17_0] -; CHECK-NEXT: uunpklo z5.h, z5.b -; CHECK-NEXT: mul z3.h, p0/m, z3.h, z5.h -; CHECK-NEXT: movprfx z5, z6 +; CHECK-NEXT: uunpklo z7.h, z2.b +; CHECK-NEXT: ext z2.b, z2.b, z2.b, #8 +; CHECK-NEXT: uunpklo z2.h, z2.b +; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: movprfx z3, z4 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h ; CHECK-NEXT: mul z5.h, p0/m, z5.h, z7.h -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z4.h ; CHECK-NEXT: movprfx z4, z5 -; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z16.h -; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z16.h -; CHECK-NEXT: fmov w9, s4 -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: mov z5.h, z3.h[7] -; CHECK-NEXT: mov z6.h, z3.h[6] -; CHECK-NEXT: mov z7.h, z3.h[5] -; CHECK-NEXT: fmov w10, s5 -; CHECK-NEXT: strb w9, [sp, #16] -; CHECK-NEXT: strb w8, [sp, #24] -; CHECK-NEXT: fmov w8, s6 -; CHECK-NEXT: fmov w9, s7 -; CHECK-NEXT: mov z17.h, z3.h[4] -; CHECK-NEXT: mov z18.h, z3.h[3] -; CHECK-NEXT: mov z19.h, z3.h[2] -; CHECK-NEXT: strb w10, [sp, #31] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #30] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #29] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z3.h[1] -; CHECK-NEXT: mov z3.h, z4.h[7] -; CHECK-NEXT: mov z21.h, z4.h[6] -; CHECK-NEXT: strb w10, [sp, #28] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #27] -; CHECK-NEXT: fmov w8, s3 -; CHECK-NEXT: strb w9, [sp, #26] -; CHECK-NEXT: fmov w9, s21 -; CHECK-NEXT: mov z22.h, z4.h[5] -; CHECK-NEXT: mov z23.h, z4.h[4] -; CHECK-NEXT: mov z24.h, z4.h[3] -; CHECK-NEXT: strb w10, [sp, #25] -; CHECK-NEXT: fmov w10, s22 -; CHECK-NEXT: strb w8, [sp, #23] -; CHECK-NEXT: fmov w8, s23 -; CHECK-NEXT: strb w9, [sp, #22] -; CHECK-NEXT: fmov w9, s24 -; CHECK-NEXT: mov z25.h, z4.h[2] -; CHECK-NEXT: mov z26.h, z4.h[1] -; CHECK-NEXT: strb w10, [sp, #21] -; CHECK-NEXT: fmov w10, s25 -; CHECK-NEXT: strb w8, [sp, #20] -; CHECK-NEXT: movprfx z1, z2 -; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z16.h -; CHECK-NEXT: strb w9, [sp, #19] -; CHECK-NEXT: fmov w8, s26 -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z16.h -; CHECK-NEXT: mov z2.h, z1.h[7] -; CHECK-NEXT: mov z3.h, z1.h[6] -; CHECK-NEXT: strb w10, [sp, #18] -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: strb w8, [sp, #17] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strb w9, [sp, #8] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z1.h[5] -; CHECK-NEXT: mov z5.h, z1.h[4] -; CHECK-NEXT: mov z6.h, z1.h[3] -; CHECK-NEXT: strb w10, [sp] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #15] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #14] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z1.h[2] -; CHECK-NEXT: mov z16.h, z1.h[1] -; CHECK-NEXT: mov z1.h, z0.h[7] -; CHECK-NEXT: strb w10, [sp, #13] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #12] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #11] -; CHECK-NEXT: fmov w9, s1 -; CHECK-NEXT: mov z17.h, z0.h[6] -; CHECK-NEXT: mov z18.h, z0.h[5] -; CHECK-NEXT: mov z19.h, z0.h[4] -; CHECK-NEXT: strb w10, [sp, #10] -; CHECK-NEXT: fmov w10, s17 -; CHECK-NEXT: strb w8, [sp, #9] -; CHECK-NEXT: fmov w8, s18 -; CHECK-NEXT: strb w9, [sp, #7] -; CHECK-NEXT: fmov w9, s19 -; CHECK-NEXT: mov z20.h, z0.h[3] -; CHECK-NEXT: mov z21.h, z0.h[2] -; CHECK-NEXT: mov z22.h, z0.h[1] -; CHECK-NEXT: strb w10, [sp, #6] -; CHECK-NEXT: fmov w10, s20 -; CHECK-NEXT: strb w8, [sp, #5] -; CHECK-NEXT: fmov w8, s21 -; CHECK-NEXT: strb w9, [sp, #4] -; CHECK-NEXT: fmov w9, s22 -; CHECK-NEXT: strb w10, [sp, #3] -; CHECK-NEXT: strb w8, [sp, #2] -; CHECK-NEXT: strb w9, [sp, #1] -; CHECK-NEXT: ldp q0, q1, [sp] -; CHECK-NEXT: stp q0, q1, [x0] -; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: lsr z4.h, p0/m, z4.h, z2.h +; CHECK-NEXT: lsr z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: uzp1 z2.b, z3.b, z3.b +; CHECK-NEXT: uzp1 z3.b, z4.b, z4.b +; CHECK-NEXT: splice z2.b, p0, z2.b, z1.b +; CHECK-NEXT: splice z3.b, p0, z3.b, z0.b +; CHECK-NEXT: stp q2, q3, [x0] ; CHECK-NEXT: ret %op1 = load <32 x i8>, <32 x i8>* %a %op2 = load <32 x i8>, <32 x i8>* %b diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll @@ -0,0 +1,814 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; UADDV +; + +define i8 @uaddv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: uaddv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uaddv d0, p0, z0.b +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @uaddv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: uaddv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: uaddv d0, p0, z0.b +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @uaddv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: uaddv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: add z0.b, z1.b, z0.b +; CHECK-NEXT: uaddv d0, p0, z0.b +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @uaddv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: uaddv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uaddv d0, p0, z0.h +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @uaddv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: uaddv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: uaddv d0, p0, z0.h +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @uaddv_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: uaddv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: add z0.h, z1.h, z0.h +; CHECK-NEXT: uaddv d0, p0, z0.h +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @uaddv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: uaddv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uaddv d0, p0, z0.s +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @uaddv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: uaddv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uaddv d0, p0, z0.s +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @uaddv_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: uaddv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: add z0.s, z1.s, z0.s +; CHECK-NEXT: uaddv d0, p0, z0.s +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %op) + ret i32 %res +} + +define i64 @uaddv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: uaddv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: uaddv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @uaddv_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: uaddv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: add z0.d, z1.d, z0.d +; CHECK-NEXT: uaddv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %op) + ret i64 %res +} + +; +; SMAXV +; + +define i8 @smaxv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: smaxv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: smaxv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @smaxv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: smaxv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: smaxv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @smaxv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: smaxv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: smaxv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @smaxv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: smaxv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: smaxv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @smaxv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: smaxv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: smaxv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @smaxv_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: smaxv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: smaxv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @smaxv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: smaxv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: smaxv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @smaxv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: smaxv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: smaxv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @smaxv_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: smaxv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: smaxv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %op) + ret i32 %res +} + +; No NEON 64-bit vector SMAXV support. Use SVE. +define i64 @smaxv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: smaxv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: smaxv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @smaxv_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: smaxv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: smaxv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %op) + ret i64 %res +} + +; +; SMINV +; + +define i8 @sminv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: sminv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: sminv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @sminv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: sminv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: sminv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @sminv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: sminv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: sminv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @sminv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: sminv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: sminv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @sminv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: sminv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: sminv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @sminv_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: sminv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: sminv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @sminv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: sminv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: sminv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @sminv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: sminv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: sminv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @sminv_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: sminv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: sminv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %op) + ret i32 %res +} + +; No NEON 64-bit vector SMINV support. Use SVE. +define i64 @sminv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: sminv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: sminv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @sminv_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: sminv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: sminv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %op) + ret i64 %res +} + +; +; UMAXV +; + +define i8 @umaxv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: umaxv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: umaxv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @umaxv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: umaxv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: umaxv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @umaxv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: umaxv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: umaxv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @umaxv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: umaxv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: umaxv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @umaxv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: umaxv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: umaxv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @umaxv_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: umaxv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: umaxv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @umaxv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: umaxv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: umaxv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @umaxv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: umaxv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: umaxv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @umaxv_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: umaxv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: umaxv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %op) + ret i32 %res +} + +; No NEON 64-bit vector UMAXV support. Use SVE. +define i64 @umaxv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: umaxv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: umaxv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @umaxv_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: umaxv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: umaxv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %op) + ret i64 %res +} + +; +; UMINV +; + +define i8 @uminv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: uminv_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uminv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %a) + ret i8 %res +} + +define i8 @uminv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: uminv_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: uminv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @uminv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: uminv_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: uminv b0, p0, z0.b +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i16 @uminv_v4i16(<4 x i16> %a) #0 { +; CHECK-LABEL: uminv_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uminv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %a) + ret i16 %res +} + +define i16 @uminv_v8i16(<8 x i16> %a) #0 { +; CHECK-LABEL: uminv_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: uminv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %a) + ret i16 %res +} + +define i16 @uminv_v16i16(<16 x i16>* %a) #0 { +; CHECK-LABEL: uminv_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: uminv h0, p0, z0.h +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <16 x i16>, <16 x i16>* %a + %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %op) + ret i16 %res +} + +define i32 @uminv_v2i32(<2 x i32> %a) #0 { +; CHECK-LABEL: uminv_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uminv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %a) + ret i32 %res +} + +define i32 @uminv_v4i32(<4 x i32> %a) #0 { +; CHECK-LABEL: uminv_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: uminv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %res = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a) + ret i32 %res +} + +define i32 @uminv_v8i32(<8 x i32>* %a) #0 { +; CHECK-LABEL: uminv_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: uminv s0, p0, z0.s +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: ret + %op = load <8 x i32>, <8 x i32>* %a + %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %op) + ret i32 %res +} + +; No NEON 64-bit vector UMINV support. Use SVE. +define i64 @uminv_v2i64(<2 x i64> %a) #0 { +; CHECK-LABEL: uminv_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: uminv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %res = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %a) + ret i64 %res +} + +define i64 @uminv_v4i64(<4 x i64>* %a) #0 { +; CHECK-LABEL: uminv_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: uminv d0, p0, z0.d +; CHECK-NEXT: fmov x0, d0 +; CHECK-NEXT: ret + %op = load <4 x i64>, <4 x i64>* %a + %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %op) + ret i64 %res +} + +attributes #0 = { "target-features"="+sve" } + +declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) + +declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>) + +declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>) + +declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>) + +declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>) +declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>) + +declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>) +declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>) + +declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>) +declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>) +declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>) + +declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll --- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll @@ -10,36 +10,22 @@ define <4 x i8> @srem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: srem_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI0_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.h, vl4 ; CHECK-NEXT: ptrue p1.s, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI0_0] -; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: asr z1.h, p0/m, z1.h, z2.h +; CHECK-NEXT: asr z0.h, p0/m, z0.h, z2.h ; CHECK-NEXT: sunpklo z2.s, z1.h ; CHECK-NEXT: sunpklo z3.s, z0.h ; CHECK-NEXT: sdivr z2.s, p1/m, z2.s, z3.s -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = srem <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -48,8 +34,6 @@ define <8 x i8> @srem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: srem_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: sunpklo z2.h, z1.b @@ -63,33 +47,9 @@ ; CHECK-NEXT: sdivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.b, vl8 ; CHECK-NEXT: uzp1 z2.h, z2.h, z4.h -; CHECK-NEXT: mov z3.h, z2.h[7] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z2.h[6] -; CHECK-NEXT: mov z5.h, z2.h[5] -; CHECK-NEXT: mov z6.h, z2.h[4] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z2.h[3] -; CHECK-NEXT: mov z16.h, z2.h[2] -; CHECK-NEXT: mov z2.h, z2.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b ; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = srem <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -187,8 +147,6 @@ define <4 x i16> @srem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: srem_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -196,21 +154,9 @@ ; CHECK-NEXT: sunpklo z3.s, z0.h ; CHECK-NEXT: sdivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = srem <4 x i16> %op1, %op2 ret <4 x i16> %res @@ -379,34 +325,20 @@ define <4 x i8> @urem_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { ; CHECK-LABEL: urem_v4i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: adrp x8, .LCPI13_0 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 ; CHECK-NEXT: ldr d2, [x8, :lo12:.LCPI13_0] -; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: and z1.d, z1.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, z2.d ; CHECK-NEXT: uunpklo z2.s, z1.h ; CHECK-NEXT: uunpklo z3.s, z0.h ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = urem <4 x i8> %op1, %op2 ret <4 x i8> %res @@ -415,8 +347,6 @@ define <8 x i8> @urem_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { ; CHECK-LABEL: urem_v8i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: uunpklo z2.h, z1.b @@ -430,33 +360,9 @@ ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.b, vl8 ; CHECK-NEXT: uzp1 z2.h, z2.h, z4.h -; CHECK-NEXT: mov z3.h, z2.h[7] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: mov z4.h, z2.h[6] -; CHECK-NEXT: mov z5.h, z2.h[5] -; CHECK-NEXT: mov z6.h, z2.h[4] -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strb w8, [sp, #8] -; CHECK-NEXT: fmov w8, s5 -; CHECK-NEXT: strb w9, [sp, #15] -; CHECK-NEXT: fmov w9, s6 -; CHECK-NEXT: mov z7.h, z2.h[3] -; CHECK-NEXT: mov z16.h, z2.h[2] -; CHECK-NEXT: mov z2.h, z2.h[1] -; CHECK-NEXT: strb w10, [sp, #14] -; CHECK-NEXT: fmov w10, s7 -; CHECK-NEXT: strb w8, [sp, #13] -; CHECK-NEXT: fmov w8, s16 -; CHECK-NEXT: strb w9, [sp, #12] -; CHECK-NEXT: fmov w9, s2 -; CHECK-NEXT: strb w10, [sp, #11] -; CHECK-NEXT: strb w8, [sp, #10] -; CHECK-NEXT: strb w9, [sp, #9] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b ; CHECK-NEXT: mls z0.b, p0/m, z2.b, z1.b ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = urem <8 x i8> %op1, %op2 ret <8 x i8> %res @@ -554,8 +460,6 @@ define <4 x i16> @urem_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { ; CHECK-LABEL: urem_v4i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sub sp, sp, #16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: ptrue p0.s, vl4 @@ -563,21 +467,9 @@ ; CHECK-NEXT: uunpklo z3.s, z0.h ; CHECK-NEXT: udivr z2.s, p0/m, z2.s, z3.s ; CHECK-NEXT: ptrue p0.h, vl4 -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov z3.s, z2.s[3] -; CHECK-NEXT: mov z4.s, z2.s[2] -; CHECK-NEXT: mov z2.s, z2.s[1] -; CHECK-NEXT: fmov w9, s3 -; CHECK-NEXT: fmov w10, s4 -; CHECK-NEXT: strh w8, [sp, #8] -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: strh w9, [sp, #14] -; CHECK-NEXT: strh w10, [sp, #12] -; CHECK-NEXT: strh w8, [sp, #10] -; CHECK-NEXT: ldr d2, [sp, #8] +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h ; CHECK-NEXT: mls z0.h, p0/m, z2.h, z1.h ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 -; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %res = urem <4 x i16> %op1, %op2 ret <4 x i16> %res diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-mask-opt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-mask-opt.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-mask-opt.ll @@ -0,0 +1,591 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; LD1B +; + +define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: ld1b { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1b { z0.h }, p0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = load <4 x i8*>, <4 x i8*>* %b + %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x i8*> %ptrs, i32 8, <4 x i1> , <4 x i8> undef) + store <4 x i8> %vals, <4 x i8>* %a + ret void +} + +define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q1, q0, [x1, #32] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: ld1b { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strb w8, [sp, #12] +; CHECK-NEXT: ld1b { z3.d }, p0/z, [z3.d] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: strb w8, [sp, #10] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ld1b { z2.d }, p0/z, [z2.d] +; CHECK-NEXT: strb w9, [sp, #13] +; CHECK-NEXT: strb w8, [sp, #15] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: uzp1 z0.s, z2.s, z2.s +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strb w8, [sp, #11] +; CHECK-NEXT: strb w10, [sp, #8] +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = load <8 x i8*>, <8 x i8*>* %b + %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x i8*> %ptrs, i32 8, <8 x i1> , <8 x i8> undef) + store <8 x i8> %vals, <8 x i8>* %a + ret void +} + +define void @masked_gather_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q2, q3, [x1, #96] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ldp q4, q5, [x1, #32] +; CHECK-NEXT: ldp q6, q7, [x1, #64] +; CHECK-NEXT: ld1b { z3.d }, p0/z, [z3.d] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: ld1b { z2.d }, p0/z, [z2.d] +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z2.s[1] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: strb w8, [sp, #12] +; CHECK-NEXT: ld1b { z7.d }, p0/z, [z7.d] +; CHECK-NEXT: uzp1 z7.s, z7.s, z7.s +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: mov z7.s, z7.s[1] +; CHECK-NEXT: strb w8, [sp, #10] +; CHECK-NEXT: ld1b { z6.d }, p0/z, [z6.d] +; CHECK-NEXT: uzp1 z6.s, z6.s, z6.s +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z6.s, z6.s[1] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: ld1b { z5.d }, p0/z, [z5.d] +; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: mov z5.s, z5.s[1] +; CHECK-NEXT: strb w8, [sp, #6] +; CHECK-NEXT: ld1b { z4.d }, p0/z, [z4.d] +; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z4.s, z4.s[1] +; CHECK-NEXT: strb w8, [sp, #4] +; CHECK-NEXT: ld1b { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: strb w9, [sp, #15] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: strb w10, [sp, #13] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: strb w8, [sp, #11] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: mov z2.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strb w10, [sp, #7] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strb w8, [sp, #5] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: strb w9, [sp] +; CHECK-NEXT: strb w10, [sp, #3] +; CHECK-NEXT: strb w8, [sp, #1] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = load <16 x i8*>, <16 x i8*>* %b + %vals = call <16 x i8> @llvm.masked.gather.v16i8(<16 x i8*> %ptrs, i32 8, <16 x i1> , <16 x i8> undef) + store <16 x i8> %vals, <16 x i8>* %a + ret void +} + +define void @masked_gather_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q18, q19, [x1, #96] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q0, q1, [x1, #128] +; CHECK-NEXT: ldp q2, q3, [x1, #160] +; CHECK-NEXT: ldp q4, q5, [x1, #192] +; CHECK-NEXT: ldp q6, q7, [x1, #224] +; CHECK-NEXT: ldp q16, q17, [x1] +; CHECK-NEXT: ldp q20, q21, [x1, #32] +; CHECK-NEXT: ldp q22, q23, [x1, #64] +; CHECK-NEXT: ld1b { z19.d }, p0/z, [z19.d] +; CHECK-NEXT: uzp1 z19.s, z19.s, z19.s +; CHECK-NEXT: fmov w8, s19 +; CHECK-NEXT: mov z19.s, z19.s[1] +; CHECK-NEXT: fmov w9, s19 +; CHECK-NEXT: strb w8, [sp, #14] +; CHECK-NEXT: ld1b { z18.d }, p0/z, [z18.d] +; CHECK-NEXT: uzp1 z18.s, z18.s, z18.s +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z18.s, z18.s[1] +; CHECK-NEXT: fmov w10, s18 +; CHECK-NEXT: strb w8, [sp, #12] +; CHECK-NEXT: ld1b { z23.d }, p0/z, [z23.d] +; CHECK-NEXT: uzp1 z23.s, z23.s, z23.s +; CHECK-NEXT: fmov w8, s23 +; CHECK-NEXT: mov z23.s, z23.s[1] +; CHECK-NEXT: strb w8, [sp, #10] +; CHECK-NEXT: ld1b { z22.d }, p0/z, [z22.d] +; CHECK-NEXT: uzp1 z22.s, z22.s, z22.s +; CHECK-NEXT: fmov w8, s22 +; CHECK-NEXT: mov z22.s, z22.s[1] +; CHECK-NEXT: strb w8, [sp, #8] +; CHECK-NEXT: ld1b { z21.d }, p0/z, [z21.d] +; CHECK-NEXT: uzp1 z21.s, z21.s, z21.s +; CHECK-NEXT: fmov w8, s21 +; CHECK-NEXT: mov z21.s, z21.s[1] +; CHECK-NEXT: strb w8, [sp, #6] +; CHECK-NEXT: ld1b { z20.d }, p0/z, [z20.d] +; CHECK-NEXT: uzp1 z20.s, z20.s, z20.s +; CHECK-NEXT: fmov w8, s20 +; CHECK-NEXT: mov z20.s, z20.s[1] +; CHECK-NEXT: strb w8, [sp, #4] +; CHECK-NEXT: ld1b { z17.d }, p0/z, [z17.d] +; CHECK-NEXT: uzp1 z17.s, z17.s, z17.s +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: mov z17.s, z17.s[1] +; CHECK-NEXT: strb w8, [sp, #2] +; CHECK-NEXT: fmov w8, s23 +; CHECK-NEXT: ld1b { z16.d }, p0/z, [z16.d] +; CHECK-NEXT: strb w9, [sp, #15] +; CHECK-NEXT: fmov w9, s22 +; CHECK-NEXT: strb w10, [sp, #13] +; CHECK-NEXT: fmov w10, s21 +; CHECK-NEXT: strb w8, [sp, #11] +; CHECK-NEXT: fmov w8, s20 +; CHECK-NEXT: uzp1 z16.s, z16.s, z16.s +; CHECK-NEXT: strb w9, [sp, #9] +; CHECK-NEXT: fmov w9, s17 +; CHECK-NEXT: mov z17.s, z16.s[1] +; CHECK-NEXT: strb w10, [sp, #7] +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: strb w8, [sp, #5] +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: strb w9, [sp, #3] +; CHECK-NEXT: strb w10, [sp] +; CHECK-NEXT: strb w8, [sp, #1] +; CHECK-NEXT: ld1b { z7.d }, p0/z, [z7.d] +; CHECK-NEXT: uzp1 z7.s, z7.s, z7.s +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: mov z7.s, z7.s[1] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: strb w8, [sp, #30] +; CHECK-NEXT: ld1b { z6.d }, p0/z, [z6.d] +; CHECK-NEXT: uzp1 z6.s, z6.s, z6.s +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z6.s, z6.s[1] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: strb w8, [sp, #28] +; CHECK-NEXT: ld1b { z5.d }, p0/z, [z5.d] +; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: mov z5.s, z5.s[1] +; CHECK-NEXT: strb w8, [sp, #26] +; CHECK-NEXT: ld1b { z4.d }, p0/z, [z4.d] +; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z4.s, z4.s[1] +; CHECK-NEXT: strb w8, [sp, #24] +; CHECK-NEXT: ld1b { z3.d }, p0/z, [z3.d] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: strb w8, [sp, #22] +; CHECK-NEXT: ld1b { z2.d }, p0/z, [z2.d] +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z2.s[1] +; CHECK-NEXT: strb w8, [sp, #20] +; CHECK-NEXT: ld1b { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: strb w8, [sp, #18] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: strb w9, [sp, #31] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w10, [sp, #29] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strb w8, [sp, #27] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: strb w9, [sp, #25] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: strb w10, [sp, #23] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: strb w8, [sp, #21] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: strb w9, [sp, #19] +; CHECK-NEXT: strb w10, [sp, #16] +; CHECK-NEXT: strb w8, [sp, #17] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %ptrs = load <32 x i8*>, <32 x i8*>* %b + %vals = call <32 x i8> @llvm.masked.gather.v32i8(<32 x i8*> %ptrs, i32 8, <32 x i1> , <32 x i8> undef) + store <32 x i8> %vals, <32 x i8>* %a + ret void +} + +; +; LD1H +; + +define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %ptrs = load <2 x i16*>, <2 x i16*>* %b + %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x i16*> %ptrs, i32 8, <2 x i1> , <2 x i16> undef) + store <2 x i16> %vals, <2 x i16>* %a + ret void +} + +define void @masked_gather_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: strh w9, [sp, #14] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: strh w10, [sp, #10] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = load <4 x i16*>, <4 x i16*>* %b + %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x i16*> %ptrs, i32 8, <4 x i1> , <4 x i16> undef) + store <4 x i16> %vals, <4 x i16>* %a + ret void +} + +define void @masked_gather_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldp q1, q0, [x1, #32] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1] +; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: ld1h { z3.d }, p0/z, [z3.d] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: strh w8, [sp, #4] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ld1h { z2.d }, p0/z, [z2.d] +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: uzp1 z0.s, z2.s, z2.s +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w8, [sp, #6] +; CHECK-NEXT: strh w10, [sp] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ldr q0, [sp] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %ptrs = load <8 x i16*>, <8 x i16*>* %b + %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x i16*> %ptrs, i32 8, <8 x i1> , <8 x i16> undef) + store <8 x i16> %vals, <8 x i16>* %a + ret void +} + +define void @masked_gather_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: ldp q2, q3, [x1, #32] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q0, q1, [x1, #64] +; CHECK-NEXT: ldp q4, q5, [x1, #96] +; CHECK-NEXT: ldp q6, q7, [x1] +; CHECK-NEXT: ld1h { z3.d }, p0/z, [z3.d] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.s, z3.s[1] +; CHECK-NEXT: strh w8, [sp, #12] +; CHECK-NEXT: ld1h { z2.d }, p0/z, [z2.d] +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z2.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w8, [sp, #8] +; CHECK-NEXT: ld1h { z7.d }, p0/z, [z7.d] +; CHECK-NEXT: uzp1 z7.s, z7.s, z7.s +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: strh w8, [sp, #4] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: ld1h { z6.d }, p0/z, [z6.d] +; CHECK-NEXT: mov z3.s, z7.s[1] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: strh w9, [sp, #10] +; CHECK-NEXT: strh w8, [sp, #14] +; CHECK-NEXT: uzp1 z2.s, z6.s, z6.s +; CHECK-NEXT: strh w10, [sp, #6] +; CHECK-NEXT: mov z3.s, z2.s[1] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strh w8, [sp] +; CHECK-NEXT: strh w9, [sp, #2] +; CHECK-NEXT: ld1h { z2.d }, p0/z, [z5.d] +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.s, z2.s[1] +; CHECK-NEXT: strh w8, [sp, #28] +; CHECK-NEXT: ld1h { z3.d }, p0/z, [z4.d] +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: strh w8, [sp, #24] +; CHECK-NEXT: ld1h { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.s, z1.s[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: strh w8, [sp, #20] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: mov z2.s, z3.s[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w10, [sp, #22] +; CHECK-NEXT: strh w8, [sp, #30] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: strh w9, [sp, #26] +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: strh w8, [sp, #16] +; CHECK-NEXT: strh w9, [sp, #18] +; CHECK-NEXT: ldp q1, q0, [sp] +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ret + %ptrs = load <16 x i16*>, <16 x i16*>* %b + %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x i16*> %ptrs, i32 8, <16 x i1> , <16 x i16> undef) + store <16 x i16> %vals, <16 x i16>* %a + ret void +} + +; +; LD1W +; + +define void @masked_gather_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %ptrs = load <2 x i32*>, <2 x i32*>* %b + %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*> %ptrs, i32 8, <2 x i1> , <2 x i32> undef) + store <2 x i32> %vals, <2 x i32>* %a + ret void +} + +define void @masked_gather_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: ld1w { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s +; CHECK-NEXT: str q1, [x0] +; CHECK-NEXT: ret + %ptrs = load <4 x i32*>, <4 x i32*>* %b + %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 8, <4 x i1> , <4 x i32> undef) + store <4 x i32> %vals, <4 x i32>* %a + ret void +} + +define void @masked_gather_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q3, [x1, #32] +; CHECK-NEXT: ld1w { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ld1w { z3.d }, p0/z, [z3.d] +; CHECK-NEXT: ld1w { z2.d }, p0/z, [z2.d] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z2.s, p0, z2.s, z3.s +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: ret + %ptrs = load <8 x i32*>, <8 x i32*>* %b + %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptrs, i32 8, <8 x i1> , <8 x i32> undef) + store <8 x i32> %vals, <8 x i32>* %a + ret void +} + +; +; LD1D +; + +define void @masked_gather_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: ret + %ptrs = load <2 x i64*>, <2 x i64*>* %b + %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %ptrs, i32 8, <2 x i1> , <2 x i64> undef) + store <2 x i64> %vals, <2 x i64>* %a + ret void +} + +define void @masked_gather_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x1] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ld1d { z1.d }, p0/z, [z1.d] +; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %ptrs = load <4 x i64*>, <4 x i64*>* %b + %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %ptrs, i32 8, <4 x i1> , <4 x i64> undef) + store <4 x i64> %vals, <4 x i64>* %a + ret void +} + +declare <4 x i8> @llvm.masked.gather.v4i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>) +declare <8 x i8> @llvm.masked.gather.v8i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) +declare <16 x i8> @llvm.masked.gather.v16i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>) +declare <32 x i8> @llvm.masked.gather.v32i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>) + +declare <2 x i16> @llvm.masked.gather.v2i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>) +declare <4 x i16> @llvm.masked.gather.v4i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>) +declare <8 x i16> @llvm.masked.gather.v8i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) +declare <16 x i16> @llvm.masked.gather.v16i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>) + +declare <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>) +declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) +declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>) + +declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*>, i32, <2 x i1>, <2 x i64>) +declare <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*>, i32, <4 x i1>, <4 x i64>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-gather.ll @@ -0,0 +1,2138 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @masked_gather_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB0_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: fmov s0, w9 +; CHECK-NEXT: .LBB0_2: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p1.h +; CHECK-NEXT: tbnz w8, #1, .LBB0_7 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB0_8 +; CHECK-NEXT: .LBB0_4: // %else5 +; CHECK-NEXT: tbz w8, #3, .LBB0_6 +; CHECK-NEXT: .LBB0_5: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z2.h, w9 +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: cmpeq p1.h, p1/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w8 +; CHECK-NEXT: .LBB0_6: // %else8 +; CHECK-NEXT: st1b { z0.h }, p0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_7: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: cmpeq p2.h, p1/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p2/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB0_4 +; CHECK-NEXT: .LBB0_8: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p2.h, p1/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p2/m, w9 +; CHECK-NEXT: tbnz w8, #3, .LBB0_5 +; CHECK-NEXT: b .LBB0_6 + %cval = load <4 x i8>, <4 x i8>* %a + %ptrs = load <4 x i8*>, <4 x i8*>* %b + %mask = icmp eq <4 x i8> %cval, zeroinitializer + %vals = call <4 x i8> @llvm.masked.gather.v4i8(<4 x i8*> %ptrs, i32 8, <4 x i1> %mask, <4 x i8> undef) + store <4 x i8> %vals, <4 x i8>* %a + ret void +} + +define void @masked_gather_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z1.b, z0.b[6] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z6.b, z0.b[7] +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: tbz w9, #0, .LBB1_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr b0, [x9] +; CHECK-NEXT: .LBB1_2: // %else +; CHECK-NEXT: index z1.b, #0, #1 +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: tbnz w8, #1, .LBB1_11 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB1_12 +; CHECK-NEXT: .LBB1_4: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB1_13 +; CHECK-NEXT: .LBB1_5: // %else8 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB1_14 +; CHECK-NEXT: .LBB1_6: // %else11 +; CHECK-NEXT: tbnz w8, #5, .LBB1_15 +; CHECK-NEXT: .LBB1_7: // %else14 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB1_16 +; CHECK-NEXT: .LBB1_8: // %else17 +; CHECK-NEXT: tbz w8, #7, .LBB1_10 +; CHECK-NEXT: .LBB1_9: // %cond.load19 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #7 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z2.b, w9 +; CHECK-NEXT: ldrb w8, [x8] +; CHECK-NEXT: cmpeq p0.b, p0/z, z1.b, z2.b +; CHECK-NEXT: mov z0.b, p0/m, w8 +; CHECK-NEXT: .LBB1_10: // %else20 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB1_11: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.b, w10 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z2.b +; CHECK-NEXT: mov z0.b, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB1_4 +; CHECK-NEXT: .LBB1_12: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: mov z3.b, w10 +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z0.b, p1/m, w9 +; CHECK-NEXT: tbz w8, #3, .LBB1_5 +; CHECK-NEXT: .LBB1_13: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.b, w10 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z2.b +; CHECK-NEXT: mov z0.b, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB1_6 +; CHECK-NEXT: .LBB1_14: // %cond.load10 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: mov z3.b, w10 +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z0.b, p1/m, w9 +; CHECK-NEXT: tbz w8, #5, .LBB1_7 +; CHECK-NEXT: .LBB1_15: // %cond.load13 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.b, w10 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z2.b +; CHECK-NEXT: mov z0.b, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbz w8, #6, .LBB1_8 +; CHECK-NEXT: .LBB1_16: // %cond.load16 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldrb w9, [x9] +; CHECK-NEXT: mov z3.b, w10 +; CHECK-NEXT: cmpeq p1.b, p0/z, z1.b, z3.b +; CHECK-NEXT: mov z0.b, p1/m, w9 +; CHECK-NEXT: tbnz w8, #7, .LBB1_9 +; CHECK-NEXT: b .LBB1_10 + %cval = load <8 x i8>, <8 x i8>* %a + %ptrs = load <8 x i8*>, <8 x i8*>* %b + %mask = icmp eq <8 x i8> %cval, zeroinitializer + %vals = call <8 x i8> @llvm.masked.gather.v8i8(<8 x i8*> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> undef) + store <8 x i8> %vals, <8 x i8>* %a + ret void +} + +define void @masked_gather_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w9, [x0, #2] +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w9, [sp, #4] +; CHECK-NEXT: ldr d0, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp] +; CHECK-NEXT: ldr d1, [sp] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z0.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbz w9, #0, .LBB2_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: fmov s0, w9 +; CHECK-NEXT: .LBB2_2: // %else +; CHECK-NEXT: tbz w8, #1, .LBB2_4 +; CHECK-NEXT: // %bb.3: // %cond.load1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p1.s +; CHECK-NEXT: mov z1.s, w9 +; CHECK-NEXT: ldrh w8, [x8] +; CHECK-NEXT: cmpeq p1.s, p1/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p1/m, w8 +; CHECK-NEXT: .LBB2_4: // %else2 +; CHECK-NEXT: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <2 x i16>, <2 x i16>* %a + %ptrs = load <2 x i16*>, <2 x i16*>* %b + %mask = icmp eq <2 x i16> %cval, zeroinitializer + %vals = call <2 x i16> @llvm.masked.gather.v2i16(<2 x i16*> %ptrs, i32 8, <2 x i1> %mask, <2 x i16> undef) + store <2 x i16> %vals, <2 x i16>* %a + ret void +} + +define void @masked_gather_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB3_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: .LBB3_2: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: tbnz w8, #1, .LBB3_7 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB3_8 +; CHECK-NEXT: .LBB3_4: // %else5 +; CHECK-NEXT: tbz w8, #3, .LBB3_6 +; CHECK-NEXT: .LBB3_5: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z2.h, w9 +; CHECK-NEXT: ldrh w8, [x8] +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, w8 +; CHECK-NEXT: .LBB3_6: // %else8 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_7: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB3_4 +; CHECK-NEXT: .LBB3_8: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbnz w8, #3, .LBB3_5 +; CHECK-NEXT: b .LBB3_6 + %cval = load <4 x i16>, <4 x i16>* %a + %ptrs = load <4 x i16*>, <4 x i16*>* %b + %mask = icmp eq <4 x i16> %cval, zeroinitializer + %vals = call <4 x i16> @llvm.masked.gather.v4i16(<4 x i16*> %ptrs, i32 8, <4 x i1> %mask, <4 x i16> undef) + store <4 x i16> %vals, <4 x i16>* %a + ret void +} + +define void @masked_gather_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z1.b, z0.b[6] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z6.b, z0.b[7] +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: tbz w9, #0, .LBB4_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: .LBB4_2: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: tbnz w8, #1, .LBB4_11 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB4_12 +; CHECK-NEXT: .LBB4_4: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB4_13 +; CHECK-NEXT: .LBB4_5: // %else8 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB4_14 +; CHECK-NEXT: .LBB4_6: // %else11 +; CHECK-NEXT: tbnz w8, #5, .LBB4_15 +; CHECK-NEXT: .LBB4_7: // %else14 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB4_16 +; CHECK-NEXT: .LBB4_8: // %else17 +; CHECK-NEXT: tbz w8, #7, .LBB4_10 +; CHECK-NEXT: .LBB4_9: // %cond.load19 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #7 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z2.h, w9 +; CHECK-NEXT: ldrh w8, [x8] +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p0/m, w8 +; CHECK-NEXT: .LBB4_10: // %else20 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_11: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB4_4 +; CHECK-NEXT: .LBB4_12: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #3, .LBB4_5 +; CHECK-NEXT: .LBB4_13: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB4_6 +; CHECK-NEXT: .LBB4_14: // %cond.load10 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #5, .LBB4_7 +; CHECK-NEXT: .LBB4_15: // %cond.load13 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbz w8, #6, .LBB4_8 +; CHECK-NEXT: .LBB4_16: // %cond.load16 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbnz w8, #7, .LBB4_9 +; CHECK-NEXT: b .LBB4_10 + %cval = load <8 x i16>, <8 x i16>* %a + %ptrs = load <8 x i16*>, <8 x i16*>* %b + %mask = icmp eq <8 x i16> %cval, zeroinitializer + %vals = call <8 x i16> @llvm.masked.gather.v8i16(<8 x i16*> %ptrs, i32 8, <8 x i1> %mask, <8 x i16> undef) + store <8 x i16> %vals, <8 x i16>* %a + ret void +} + +define void @masked_gather_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q0, q2, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: cmpeq p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: cmpeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w8, w9, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z6.b, z0.b[6] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: bfi w8, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w8, w11, #3, #1 +; CHECK-NEXT: fmov w11, s6 +; CHECK-NEXT: mov z7.b, z0.b[7] +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: bfi w8, w9, #4, #1 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: bfi w8, w10, #5, #1 +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w11, lsl #6 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: orr w8, w8, w9, lsl #7 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: orr w8, w8, w10, lsl #8 +; CHECK-NEXT: and w10, w11, #0x1 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #9 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z6.b, z0.b[6] +; CHECK-NEXT: and w10, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w9, lsl #10 +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov w11, s6 +; CHECK-NEXT: orr w8, w8, w10, lsl #11 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: mov z0.b, z0.b[7] +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: orr w8, w8, w9, lsl #12 +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #13 +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: orr w8, w8, w9, lsl #14 +; CHECK-NEXT: orr w9, w8, w10, lsl #15 +; CHECK-NEXT: and w8, w9, #0xffff +; CHECK-NEXT: tbz w9, #0, .LBB5_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: b .LBB5_3 +; CHECK-NEXT: .LBB5_2: +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: .LBB5_3: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: tbnz w8, #1, .LBB5_12 +; CHECK-NEXT: // %bb.4: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB5_13 +; CHECK-NEXT: .LBB5_5: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB5_14 +; CHECK-NEXT: .LBB5_6: // %else8 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB5_15 +; CHECK-NEXT: .LBB5_7: // %else11 +; CHECK-NEXT: tbnz w8, #5, .LBB5_16 +; CHECK-NEXT: .LBB5_8: // %else14 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB5_17 +; CHECK-NEXT: .LBB5_9: // %else17 +; CHECK-NEXT: tbnz w8, #7, .LBB5_18 +; CHECK-NEXT: .LBB5_10: // %else20 +; CHECK-NEXT: ldr q3, [x1, #64] +; CHECK-NEXT: tbz w8, #8, .LBB5_19 +; CHECK-NEXT: .LBB5_11: // %cond.load22 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: ptrue p1.h, vl1 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: tbnz w8, #9, .LBB5_20 +; CHECK-NEXT: b .LBB5_21 +; CHECK-NEXT: .LBB5_12: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB5_5 +; CHECK-NEXT: .LBB5_13: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #3, .LBB5_6 +; CHECK-NEXT: .LBB5_14: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB5_7 +; CHECK-NEXT: .LBB5_15: // %cond.load10 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #5, .LBB5_8 +; CHECK-NEXT: .LBB5_16: // %cond.load13 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbz w8, #6, .LBB5_9 +; CHECK-NEXT: .LBB5_17: // %cond.load16 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #7, .LBB5_10 +; CHECK-NEXT: .LBB5_18: // %cond.load19 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #7 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: mov z0.h, p1/m, w9 +; CHECK-NEXT: ldr q3, [x1, #64] +; CHECK-NEXT: tbnz w8, #8, .LBB5_11 +; CHECK-NEXT: .LBB5_19: +; CHECK-NEXT: // implicit-def: $q2 +; CHECK-NEXT: tbz w8, #9, .LBB5_21 +; CHECK-NEXT: .LBB5_20: // %cond.load25 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: .LBB5_21: // %else26 +; CHECK-NEXT: ldr q3, [x1, #80] +; CHECK-NEXT: tbnz w8, #10, .LBB5_29 +; CHECK-NEXT: // %bb.22: // %else29 +; CHECK-NEXT: tbnz w8, #11, .LBB5_30 +; CHECK-NEXT: .LBB5_23: // %else32 +; CHECK-NEXT: ldr q3, [x1, #96] +; CHECK-NEXT: tbnz w8, #12, .LBB5_31 +; CHECK-NEXT: .LBB5_24: // %else35 +; CHECK-NEXT: tbnz w8, #13, .LBB5_32 +; CHECK-NEXT: .LBB5_25: // %else38 +; CHECK-NEXT: ldr q3, [x1, #112] +; CHECK-NEXT: tbnz w8, #14, .LBB5_33 +; CHECK-NEXT: .LBB5_26: // %else41 +; CHECK-NEXT: tbz w8, #15, .LBB5_28 +; CHECK-NEXT: .LBB5_27: // %cond.load43 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w9, #7 +; CHECK-NEXT: fmov x8, d3 +; CHECK-NEXT: mov z3.h, w9 +; CHECK-NEXT: ldrh w8, [x8] +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z2.h, p0/m, w8 +; CHECK-NEXT: .LBB5_28: // %else44 +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB5_29: // %cond.load28 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #11, .LBB5_23 +; CHECK-NEXT: .LBB5_30: // %cond.load31 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: ldr q3, [x1, #96] +; CHECK-NEXT: tbz w8, #12, .LBB5_24 +; CHECK-NEXT: .LBB5_31: // %cond.load34 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: tbz w8, #13, .LBB5_25 +; CHECK-NEXT: .LBB5_32: // %cond.load37 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: ldr q3, [x1, #112] +; CHECK-NEXT: tbz w8, #14, .LBB5_26 +; CHECK-NEXT: .LBB5_33: // %cond.load40 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldrh w9, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p1/m, w9 +; CHECK-NEXT: tbnz w8, #15, .LBB5_27 +; CHECK-NEXT: b .LBB5_28 + %cval = load <16 x i16>, <16 x i16>* %a + %ptrs = load <16 x i16*>, <16 x i16*>* %b + %mask = icmp eq <16 x i16> %cval, zeroinitializer + %vals = call <16 x i16> @llvm.masked.gather.v16i16(<16 x i16*> %ptrs, i32 8, <16 x i1> %mask, <16 x i16> undef) + store <16 x i16> %vals, <16 x i16>* %a + ret void +} + +define void @masked_gather_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbz w9, #0, .LBB6_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldr s0, [x9] +; CHECK-NEXT: .LBB6_2: // %else +; CHECK-NEXT: tbz w8, #1, .LBB6_4 +; CHECK-NEXT: // %bb.3: // %cond.load1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: index z2.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z1.s, w9 +; CHECK-NEXT: ldr w8, [x8] +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p0/m, w8 +; CHECK-NEXT: .LBB6_4: // %else2 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <2 x i32>, <2 x i32>* %a + %ptrs = load <2 x i32*>, <2 x i32*>* %b + %mask = icmp eq <2 x i32> %cval, zeroinitializer + %vals = call <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*> %ptrs, i32 8, <2 x i1> %mask, <2 x i32> undef) + store <2 x i32> %vals, <2 x i32>* %a + ret void +} + +define void @masked_gather_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB7_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr s0, [x9] +; CHECK-NEXT: .LBB7_2: // %else +; CHECK-NEXT: index z1.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: tbnz w8, #1, .LBB7_7 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB7_8 +; CHECK-NEXT: .LBB7_4: // %else5 +; CHECK-NEXT: tbz w8, #3, .LBB7_6 +; CHECK-NEXT: .LBB7_5: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z2.s, w9 +; CHECK-NEXT: ldr w8, [x8] +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, w8 +; CHECK-NEXT: .LBB7_6: // %else8 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB7_7: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.s, w10 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z2.s +; CHECK-NEXT: mov z0.s, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB7_4 +; CHECK-NEXT: .LBB7_8: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: mov z3.s, w10 +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z0.s, p1/m, w9 +; CHECK-NEXT: tbnz w8, #3, .LBB7_5 +; CHECK-NEXT: b .LBB7_6 + %cval = load <4 x i32>, <4 x i32>* %a + %ptrs = load <4 x i32*>, <4 x i32*>* %b + %mask = icmp eq <4 x i32> %cval, zeroinitializer + %vals = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 8, <4 x i1> %mask, <4 x i32> undef) + store <4 x i32> %vals, <4 x i32>* %a + ret void +} + +define void @masked_gather_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z1.h, p1, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z1.b, z0.b[6] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z6.b, z0.b[7] +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: tbz w9, #0, .LBB8_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr s0, [x9] +; CHECK-NEXT: b .LBB8_3 +; CHECK-NEXT: .LBB8_2: +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: .LBB8_3: // %else +; CHECK-NEXT: index z1.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: tbnz w8, #1, .LBB8_8 +; CHECK-NEXT: // %bb.4: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB8_9 +; CHECK-NEXT: .LBB8_5: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB8_10 +; CHECK-NEXT: .LBB8_6: // %else8 +; CHECK-NEXT: ldr q3, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB8_11 +; CHECK-NEXT: .LBB8_7: // %cond.load10 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: ptrue p1.s, vl1 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: mov z2.s, p1/m, w9 +; CHECK-NEXT: tbnz w8, #5, .LBB8_12 +; CHECK-NEXT: b .LBB8_13 +; CHECK-NEXT: .LBB8_8: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.s, w10 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z2.s +; CHECK-NEXT: mov z0.s, p1/m, w9 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB8_5 +; CHECK-NEXT: .LBB8_9: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: mov z3.s, w10 +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z0.s, p1/m, w9 +; CHECK-NEXT: tbz w8, #3, .LBB8_6 +; CHECK-NEXT: .LBB8_10: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.s, w10 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z2.s +; CHECK-NEXT: mov z0.s, p1/m, w9 +; CHECK-NEXT: ldr q3, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB8_7 +; CHECK-NEXT: .LBB8_11: +; CHECK-NEXT: // implicit-def: $q2 +; CHECK-NEXT: tbz w8, #5, .LBB8_13 +; CHECK-NEXT: .LBB8_12: // %cond.load13 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z3.s, w10 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z2.s, p1/m, w9 +; CHECK-NEXT: .LBB8_13: // %else14 +; CHECK-NEXT: ldr q3, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB8_17 +; CHECK-NEXT: // %bb.14: // %else17 +; CHECK-NEXT: tbz w8, #7, .LBB8_16 +; CHECK-NEXT: .LBB8_15: // %cond.load19 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d3 +; CHECK-NEXT: mov z3.s, w9 +; CHECK-NEXT: ldr w8, [x8] +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z2.s, p0/m, w8 +; CHECK-NEXT: .LBB8_16: // %else20 +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB8_17: // %cond.load16 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr w9, [x9] +; CHECK-NEXT: mov z4.s, w10 +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z4.s +; CHECK-NEXT: mov z2.s, p1/m, w9 +; CHECK-NEXT: tbnz w8, #7, .LBB8_15 +; CHECK-NEXT: b .LBB8_16 + %cval = load <8 x i32>, <8 x i32>* %a + %ptrs = load <8 x i32*>, <8 x i32*>* %b + %mask = icmp eq <8 x i32> %cval, zeroinitializer + %vals = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %ptrs, i32 8, <8 x i1> %mask, <8 x i32> undef) + store <8 x i32> %vals, <8 x i32>* %a + ret void +} + +define void @masked_gather_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: cbnz x8, .LBB9_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: ldr d0, [x1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: ldr d0, [x8] +; CHECK-NEXT: .LBB9_2: // %else +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %cval = load <1 x i64>, <1 x i64>* %a + %ptrs = load <1 x i64*>, <1 x i64*>* %b + %mask = icmp eq <1 x i64> %cval, zeroinitializer + %vals = call <1 x i64> @llvm.masked.gather.v1i64(<1 x i64*> %ptrs, i32 8, <1 x i1> %mask, <1 x i64> undef) + store <1 x i64> %vals, <1 x i64>* %a + ret void +} + +define void @masked_gather_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI10_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI10_0] +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbz w9, #0, .LBB10_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldr d0, [x9] +; CHECK-NEXT: .LBB10_2: // %else +; CHECK-NEXT: tbz w8, #1, .LBB10_4 +; CHECK-NEXT: // %bb.3: // %cond.load1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: index z2.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z1.d, x9 +; CHECK-NEXT: ldr x8, [x8] +; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z1.d +; CHECK-NEXT: mov z0.d, p0/m, x8 +; CHECK-NEXT: .LBB10_4: // %else2 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <2 x i64>, <2 x i64>* %a + %ptrs = load <2 x i64*>, <2 x i64*>* %b + %mask = icmp eq <2 x i64> %cval, zeroinitializer + %vals = call <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %ptrs, i32 8, <2 x i1> %mask, <2 x i64> undef) + store <2 x i64> %vals, <2 x i64>* %a + ret void +} + +define void @masked_gather_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI11_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI11_0] +; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: cmpeq p0.d, p0/z, z2.d, z1.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB11_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr d0, [x9] +; CHECK-NEXT: b .LBB11_3 +; CHECK-NEXT: .LBB11_2: +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: .LBB11_3: // %else +; CHECK-NEXT: index z1.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: tbz w8, #1, .LBB11_5 +; CHECK-NEXT: // %bb.4: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.d, x10 +; CHECK-NEXT: ldr x9, [x9] +; CHECK-NEXT: cmpeq p1.d, p0/z, z1.d, z2.d +; CHECK-NEXT: mov z0.d, p1/m, x9 +; CHECK-NEXT: .LBB11_5: // %else2 +; CHECK-NEXT: ldr q3, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB11_7 +; CHECK-NEXT: // %bb.6: // %cond.load4 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: ptrue p1.d, vl1 +; CHECK-NEXT: ldr x9, [x9] +; CHECK-NEXT: mov z2.d, p1/m, x9 +; CHECK-NEXT: tbnz w8, #3, .LBB11_8 +; CHECK-NEXT: b .LBB11_9 +; CHECK-NEXT: .LBB11_7: +; CHECK-NEXT: // implicit-def: $q2 +; CHECK-NEXT: tbz w8, #3, .LBB11_9 +; CHECK-NEXT: .LBB11_8: // %cond.load7 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d3 +; CHECK-NEXT: mov z3.d, x9 +; CHECK-NEXT: ldr x8, [x8] +; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z2.d, p0/m, x8 +; CHECK-NEXT: .LBB11_9: // %else8 +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <4 x i64>, <4 x i64>* %a + %ptrs = load <4 x i64*>, <4 x i64*>* %b + %mask = icmp eq <4 x i64> %cval, zeroinitializer + %vals = call <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*> %ptrs, i32 8, <4 x i1> %mask, <4 x i64> undef) + store <4 x i64> %vals, <4 x i64>* %a + ret void +} + +define void @masked_gather_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI12_0 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI12_0] +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbz w9, #0, .LBB12_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: .LBB12_2: // %else +; CHECK-NEXT: tbz w8, #1, .LBB12_4 +; CHECK-NEXT: // %bb.3: // %cond.load1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: ldr h1, [x8] +; CHECK-NEXT: str h0, [sp] +; CHECK-NEXT: str h1, [sp, #2] +; CHECK-NEXT: ldr d0, [sp] +; CHECK-NEXT: .LBB12_4: // %else2 +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: str w8, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <2 x half>, <2 x half>* %a + %ptrs = load <2 x half*>, <2 x half*>* %b + %mask = fcmp oeq <2 x half> %cval, zeroinitializer + %vals = call <2 x half> @llvm.masked.gather.v2f16(<2 x half*> %ptrs, i32 8, <2 x i1> %mask, <2 x half> undef) + store <2 x half> %vals, <2 x half>* %a + ret void +} + +define void @masked_gather_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI13_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI13_0] +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB13_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: .LBB13_2: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: tbnz w8, #1, .LBB13_7 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB13_8 +; CHECK-NEXT: .LBB13_4: // %else5 +; CHECK-NEXT: tbz w8, #3, .LBB13_6 +; CHECK-NEXT: .LBB13_5: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z3.h, w9 +; CHECK-NEXT: ldr h2, [x8] +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p0/m, h2 +; CHECK-NEXT: .LBB13_6: // %else8 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB13_7: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB13_4 +; CHECK-NEXT: .LBB13_8: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbnz w8, #3, .LBB13_5 +; CHECK-NEXT: b .LBB13_6 + %cval = load <4 x half>, <4 x half>* %a + %ptrs = load <4 x half*>, <4 x half*>* %b + %mask = fcmp oeq <4 x half> %cval, zeroinitializer + %vals = call <4 x half> @llvm.masked.gather.v4f16(<4 x half*> %ptrs, i32 8, <4 x i1> %mask, <4 x half> undef) + store <4 x half> %vals, <4 x half>* %a + ret void +} + +define void @masked_gather_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z1.b, z0.b[6] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z6.b, z0.b[7] +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: tbz w9, #0, .LBB14_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: .LBB14_2: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: tbnz w8, #1, .LBB14_11 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB14_12 +; CHECK-NEXT: .LBB14_4: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB14_13 +; CHECK-NEXT: .LBB14_5: // %else8 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB14_14 +; CHECK-NEXT: .LBB14_6: // %else11 +; CHECK-NEXT: tbnz w8, #5, .LBB14_15 +; CHECK-NEXT: .LBB14_7: // %else14 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB14_16 +; CHECK-NEXT: .LBB14_8: // %else17 +; CHECK-NEXT: tbz w8, #7, .LBB14_10 +; CHECK-NEXT: .LBB14_9: // %cond.load19 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #7 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z3.h, w9 +; CHECK-NEXT: ldr h2, [x8] +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p0/m, h2 +; CHECK-NEXT: .LBB14_10: // %else20 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB14_11: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB14_4 +; CHECK-NEXT: .LBB14_12: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbz w8, #3, .LBB14_5 +; CHECK-NEXT: .LBB14_13: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB14_6 +; CHECK-NEXT: .LBB14_14: // %cond.load10 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbz w8, #5, .LBB14_7 +; CHECK-NEXT: .LBB14_15: // %cond.load13 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbz w8, #6, .LBB14_8 +; CHECK-NEXT: .LBB14_16: // %cond.load16 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbnz w8, #7, .LBB14_9 +; CHECK-NEXT: b .LBB14_10 + %cval = load <8 x half>, <8 x half>* %a + %ptrs = load <8 x half*>, <8 x half*>* %b + %mask = fcmp oeq <8 x half> %cval, zeroinitializer + %vals = call <8 x half> @llvm.masked.gather.v8f16(<8 x half*> %ptrs, i32 8, <8 x i1> %mask, <8 x half> undef) + store <8 x half> %vals, <8 x half>* %a + ret void +} + +define void @masked_gather_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 { +; CHECK-LABEL: masked_gather_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI15_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q0, q2, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_0] +; CHECK-NEXT: fcmeq p1.h, p0/z, z0.h, z1.h +; CHECK-NEXT: fcmeq p0.h, p0/z, z2.h, z1.h +; CHECK-NEXT: mov z0.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w8, w9, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z6.b, z0.b[6] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: bfi w8, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w8, w11, #3, #1 +; CHECK-NEXT: fmov w11, s6 +; CHECK-NEXT: mov z7.b, z0.b[7] +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: bfi w8, w9, #4, #1 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: bfi w8, w10, #5, #1 +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w11, lsl #6 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: orr w8, w8, w9, lsl #7 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: orr w8, w8, w10, lsl #8 +; CHECK-NEXT: and w10, w11, #0x1 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #9 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z6.b, z0.b[6] +; CHECK-NEXT: and w10, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w9, lsl #10 +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov w11, s6 +; CHECK-NEXT: orr w8, w8, w10, lsl #11 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: mov z0.b, z0.b[7] +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: orr w8, w8, w9, lsl #12 +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #13 +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: orr w8, w8, w9, lsl #14 +; CHECK-NEXT: orr w9, w8, w10, lsl #15 +; CHECK-NEXT: and w8, w9, #0xffff +; CHECK-NEXT: tbz w9, #0, .LBB15_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr h0, [x9] +; CHECK-NEXT: b .LBB15_3 +; CHECK-NEXT: .LBB15_2: +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: .LBB15_3: // %else +; CHECK-NEXT: index z1.h, #0, #1 +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: tbnz w8, #1, .LBB15_12 +; CHECK-NEXT: // %bb.4: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB15_13 +; CHECK-NEXT: .LBB15_5: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB15_14 +; CHECK-NEXT: .LBB15_6: // %else8 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB15_15 +; CHECK-NEXT: .LBB15_7: // %else11 +; CHECK-NEXT: tbnz w8, #5, .LBB15_16 +; CHECK-NEXT: .LBB15_8: // %else14 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB15_17 +; CHECK-NEXT: .LBB15_9: // %else17 +; CHECK-NEXT: tbnz w8, #7, .LBB15_18 +; CHECK-NEXT: .LBB15_10: // %else20 +; CHECK-NEXT: ldr q3, [x1, #64] +; CHECK-NEXT: tbz w8, #8, .LBB15_19 +; CHECK-NEXT: .LBB15_11: // %cond.load22 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: ptrue p1.h, vl1 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: sel z2.h, p1, z2.h, z0.h +; CHECK-NEXT: tbnz w8, #9, .LBB15_20 +; CHECK-NEXT: b .LBB15_21 +; CHECK-NEXT: .LBB15_12: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB15_5 +; CHECK-NEXT: .LBB15_13: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbz w8, #3, .LBB15_6 +; CHECK-NEXT: .LBB15_14: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB15_7 +; CHECK-NEXT: .LBB15_15: // %cond.load10 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbz w8, #5, .LBB15_8 +; CHECK-NEXT: .LBB15_16: // %cond.load13 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q2, [x1, #48] +; CHECK-NEXT: tbz w8, #6, .LBB15_9 +; CHECK-NEXT: .LBB15_17: // %cond.load16 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z0.h, p1/m, h3 +; CHECK-NEXT: tbz w8, #7, .LBB15_10 +; CHECK-NEXT: .LBB15_18: // %cond.load19 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #7 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, w10 +; CHECK-NEXT: ldr h2, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z3.h +; CHECK-NEXT: mov z0.h, p1/m, h2 +; CHECK-NEXT: ldr q3, [x1, #64] +; CHECK-NEXT: tbnz w8, #8, .LBB15_11 +; CHECK-NEXT: .LBB15_19: +; CHECK-NEXT: // implicit-def: $q2 +; CHECK-NEXT: tbz w8, #9, .LBB15_21 +; CHECK-NEXT: .LBB15_20: // %cond.load25 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p1/m, h3 +; CHECK-NEXT: .LBB15_21: // %else26 +; CHECK-NEXT: ldr q3, [x1, #80] +; CHECK-NEXT: tbnz w8, #10, .LBB15_29 +; CHECK-NEXT: // %bb.22: // %else29 +; CHECK-NEXT: tbnz w8, #11, .LBB15_30 +; CHECK-NEXT: .LBB15_23: // %else32 +; CHECK-NEXT: ldr q3, [x1, #96] +; CHECK-NEXT: tbnz w8, #12, .LBB15_31 +; CHECK-NEXT: .LBB15_24: // %else35 +; CHECK-NEXT: tbnz w8, #13, .LBB15_32 +; CHECK-NEXT: .LBB15_25: // %else38 +; CHECK-NEXT: ldr q3, [x1, #112] +; CHECK-NEXT: tbnz w8, #14, .LBB15_33 +; CHECK-NEXT: .LBB15_26: // %else41 +; CHECK-NEXT: tbz w8, #15, .LBB15_28 +; CHECK-NEXT: .LBB15_27: // %cond.load43 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w9, #7 +; CHECK-NEXT: fmov x8, d3 +; CHECK-NEXT: mov z4.h, w9 +; CHECK-NEXT: ldr h3, [x8] +; CHECK-NEXT: cmpeq p0.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p0/m, h3 +; CHECK-NEXT: .LBB15_28: // %else44 +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB15_29: // %cond.load28 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr h4, [x9] +; CHECK-NEXT: mov z5.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z5.h +; CHECK-NEXT: mov z2.h, p1/m, h4 +; CHECK-NEXT: tbz w8, #11, .LBB15_23 +; CHECK-NEXT: .LBB15_30: // %cond.load31 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p1/m, h3 +; CHECK-NEXT: ldr q3, [x1, #96] +; CHECK-NEXT: tbz w8, #12, .LBB15_24 +; CHECK-NEXT: .LBB15_31: // %cond.load34 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #4 +; CHECK-NEXT: ldr h4, [x9] +; CHECK-NEXT: mov z5.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z5.h +; CHECK-NEXT: mov z2.h, p1/m, h4 +; CHECK-NEXT: tbz w8, #13, .LBB15_25 +; CHECK-NEXT: .LBB15_32: // %cond.load37 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #5 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z4.h, w10 +; CHECK-NEXT: ldr h3, [x9] +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z4.h +; CHECK-NEXT: mov z2.h, p1/m, h3 +; CHECK-NEXT: ldr q3, [x1, #112] +; CHECK-NEXT: tbz w8, #14, .LBB15_26 +; CHECK-NEXT: .LBB15_33: // %cond.load40 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #6 +; CHECK-NEXT: ldr h4, [x9] +; CHECK-NEXT: mov z5.h, w10 +; CHECK-NEXT: cmpeq p1.h, p0/z, z1.h, z5.h +; CHECK-NEXT: mov z2.h, p1/m, h4 +; CHECK-NEXT: tbnz w8, #15, .LBB15_27 +; CHECK-NEXT: b .LBB15_28 + %cval = load <16 x half>, <16 x half>* %a + %ptrs = load <16 x half*>, <16 x half*>* %b + %mask = fcmp oeq <16 x half> %cval, zeroinitializer + %vals = call <16 x half> @llvm.masked.gather.v16f16(<16 x half*> %ptrs, i32 8, <16 x i1> %mask, <16 x half> undef) + store <16 x half> %vals, <16 x half>* %a + ret void +} + + +define void @masked_gather_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI16_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI16_0] +; CHECK-NEXT: fcmeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbz w9, #0, .LBB16_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldr s0, [x9] +; CHECK-NEXT: .LBB16_2: // %else +; CHECK-NEXT: tbz w8, #1, .LBB16_4 +; CHECK-NEXT: // %bb.3: // %cond.load1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: index z3.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov z2.s, w9 +; CHECK-NEXT: ldr s1, [x8] +; CHECK-NEXT: cmpeq p0.s, p0/z, z3.s, z2.s +; CHECK-NEXT: mov z0.s, p0/m, s1 +; CHECK-NEXT: .LBB16_4: // %else2 +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <2 x float>, <2 x float>* %a + %ptrs = load <2 x float*>, <2 x float*>* %b + %mask = fcmp oeq <2 x float> %cval, zeroinitializer + %vals = call <2 x float> @llvm.masked.gather.v2f32(<2 x float*> %ptrs, i32 8, <2 x i1> %mask, <2 x float> undef) + store <2 x float> %vals, <2 x float>* %a + ret void +} + +define void @masked_gather_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI17_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: fcmeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB17_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr s0, [x9] +; CHECK-NEXT: .LBB17_2: // %else +; CHECK-NEXT: index z1.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: tbnz w8, #1, .LBB17_7 +; CHECK-NEXT: // %bb.3: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB17_8 +; CHECK-NEXT: .LBB17_4: // %else5 +; CHECK-NEXT: tbz w8, #3, .LBB17_6 +; CHECK-NEXT: .LBB17_5: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d2 +; CHECK-NEXT: mov z3.s, w9 +; CHECK-NEXT: ldr s2, [x8] +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z0.s, p0/m, s2 +; CHECK-NEXT: .LBB17_6: // %else8 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB17_7: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.s, w10 +; CHECK-NEXT: ldr s2, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z0.s, p1/m, s2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB17_4 +; CHECK-NEXT: .LBB17_8: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr s3, [x9] +; CHECK-NEXT: mov z4.s, w10 +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z4.s +; CHECK-NEXT: mov z0.s, p1/m, s3 +; CHECK-NEXT: tbnz w8, #3, .LBB17_5 +; CHECK-NEXT: b .LBB17_6 + %cval = load <4 x float>, <4 x float>* %a + %ptrs = load <4 x float*>, <4 x float*>* %b + %mask = fcmp oeq <4 x float> %cval, zeroinitializer + %vals = call <4 x float> @llvm.masked.gather.v4f32(<4 x float*> %ptrs, i32 8, <4 x i1> %mask, <4 x float> undef) + store <4 x float> %vals, <4 x float>* %a + ret void +} + +define void @masked_gather_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 { +; CHECK-LABEL: masked_gather_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: fcmeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z0.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z1.h, p1, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b +; CHECK-NEXT: mov z1.b, z0.b[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: mov z2.b, z0.b[2] +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z4.b, z0.b[4] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: mov z5.b, z0.b[5] +; CHECK-NEXT: mov z1.b, z0.b[6] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z6.b, z0.b[7] +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: tbz w9, #0, .LBB18_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr s0, [x9] +; CHECK-NEXT: b .LBB18_3 +; CHECK-NEXT: .LBB18_2: +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: .LBB18_3: // %else +; CHECK-NEXT: index z1.s, #0, #1 +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: tbnz w8, #1, .LBB18_8 +; CHECK-NEXT: // %bb.4: // %else2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbnz w8, #2, .LBB18_9 +; CHECK-NEXT: .LBB18_5: // %else5 +; CHECK-NEXT: tbnz w8, #3, .LBB18_10 +; CHECK-NEXT: .LBB18_6: // %else8 +; CHECK-NEXT: ldr q3, [x1, #32] +; CHECK-NEXT: tbz w8, #4, .LBB18_11 +; CHECK-NEXT: .LBB18_7: // %cond.load10 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: ptrue p1.s, vl1 +; CHECK-NEXT: ldr s2, [x9] +; CHECK-NEXT: sel z2.s, p1, z2.s, z0.s +; CHECK-NEXT: tbnz w8, #5, .LBB18_12 +; CHECK-NEXT: b .LBB18_13 +; CHECK-NEXT: .LBB18_8: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.s, w10 +; CHECK-NEXT: ldr s2, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z0.s, p1/m, s2 +; CHECK-NEXT: ldr q2, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB18_5 +; CHECK-NEXT: .LBB18_9: // %cond.load4 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr s3, [x9] +; CHECK-NEXT: mov z4.s, w10 +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z4.s +; CHECK-NEXT: mov z0.s, p1/m, s3 +; CHECK-NEXT: tbz w8, #3, .LBB18_6 +; CHECK-NEXT: .LBB18_10: // %cond.load7 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.s, w10 +; CHECK-NEXT: ldr s2, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z3.s +; CHECK-NEXT: mov z0.s, p1/m, s2 +; CHECK-NEXT: ldr q3, [x1, #32] +; CHECK-NEXT: tbnz w8, #4, .LBB18_7 +; CHECK-NEXT: .LBB18_11: +; CHECK-NEXT: // implicit-def: $q2 +; CHECK-NEXT: tbz w8, #5, .LBB18_13 +; CHECK-NEXT: .LBB18_12: // %cond.load13 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z4.s, w10 +; CHECK-NEXT: ldr s3, [x9] +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z4.s +; CHECK-NEXT: mov z2.s, p1/m, s3 +; CHECK-NEXT: .LBB18_13: // %else14 +; CHECK-NEXT: ldr q3, [x1, #48] +; CHECK-NEXT: tbnz w8, #6, .LBB18_17 +; CHECK-NEXT: // %bb.14: // %else17 +; CHECK-NEXT: tbz w8, #7, .LBB18_16 +; CHECK-NEXT: .LBB18_15: // %cond.load19 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w9, #3 +; CHECK-NEXT: fmov x8, d3 +; CHECK-NEXT: mov z4.s, w9 +; CHECK-NEXT: ldr s3, [x8] +; CHECK-NEXT: cmpeq p0.s, p0/z, z1.s, z4.s +; CHECK-NEXT: mov z2.s, p0/m, s3 +; CHECK-NEXT: .LBB18_16: // %else20 +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB18_17: // %cond.load16 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov w10, #2 +; CHECK-NEXT: ldr s4, [x9] +; CHECK-NEXT: mov z5.s, w10 +; CHECK-NEXT: cmpeq p1.s, p0/z, z1.s, z5.s +; CHECK-NEXT: mov z2.s, p1/m, s4 +; CHECK-NEXT: tbnz w8, #7, .LBB18_15 +; CHECK-NEXT: b .LBB18_16 + %cval = load <8 x float>, <8 x float>* %a + %ptrs = load <8 x float*>, <8 x float*>* %b + %mask = fcmp oeq <8 x float> %cval, zeroinitializer + %vals = call <8 x float> @llvm.masked.gather.v8f32(<8 x float*> %ptrs, i32 8, <8 x i1> %mask, <8 x float> undef) + store <8 x float> %vals, <8 x float>* %a + ret void +} + +; Scalarize 1 x double gathers +define void @masked_gather_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: fcmp d0, #0.0 +; CHECK-NEXT: // implicit-def: $d0 +; CHECK-NEXT: b.ne .LBB19_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: ldr d0, [x1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: ldr d0, [x8] +; CHECK-NEXT: .LBB19_2: // %else +; CHECK-NEXT: str d0, [x0] +; CHECK-NEXT: ret + %cval = load <1 x double>, <1 x double>* %a + %ptrs = load <1 x double*>, <1 x double*>* %b + %mask = fcmp oeq <1 x double> %cval, zeroinitializer + %vals = call <1 x double> @llvm.masked.gather.v1f64(<1 x double*> %ptrs, i32 8, <1 x i1> %mask, <1 x double> undef) + store <1 x double> %vals, <1 x double>* %a + ret void +} + +define void @masked_gather_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI20_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI20_0] +; CHECK-NEXT: fcmeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbz w9, #0, .LBB20_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: ldr d0, [x9] +; CHECK-NEXT: .LBB20_2: // %else +; CHECK-NEXT: tbz w8, #1, .LBB20_4 +; CHECK-NEXT: // %bb.3: // %cond.load1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: index z3.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov z2.d, x9 +; CHECK-NEXT: ldr d1, [x8] +; CHECK-NEXT: cmpeq p0.d, p0/z, z3.d, z2.d +; CHECK-NEXT: mov z0.d, p0/m, d1 +; CHECK-NEXT: .LBB20_4: // %else2 +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <2 x double>, <2 x double>* %a + %ptrs = load <2 x double*>, <2 x double*>* %b + %mask = fcmp oeq <2 x double> %cval, zeroinitializer + %vals = call <2 x double> @llvm.masked.gather.v2f64(<2 x double*> %ptrs, i32 8, <2 x i1> %mask, <2 x double> undef) + store <2 x double> %vals, <2 x double>* %a + ret void +} + +define void @masked_gather_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 { +; CHECK-LABEL: masked_gather_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI21_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI21_0] +; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z1.d +; CHECK-NEXT: fcmeq p0.d, p0/z, z2.d, z1.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr q2, [x1] +; CHECK-NEXT: splice z1.s, p0, z1.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z1.h, z1.h +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: fmov w10, s0 +; CHECK-NEXT: fmov w8, s1 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbz w10, #0, .LBB21_2 +; CHECK-NEXT: // %bb.1: // %cond.load +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr d0, [x9] +; CHECK-NEXT: b .LBB21_3 +; CHECK-NEXT: .LBB21_2: +; CHECK-NEXT: // implicit-def: $q0 +; CHECK-NEXT: .LBB21_3: // %else +; CHECK-NEXT: index z1.d, #0, #1 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: tbz w8, #1, .LBB21_5 +; CHECK-NEXT: // %bb.4: // %cond.load1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: mov w10, #1 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.d, x10 +; CHECK-NEXT: ldr d2, [x9] +; CHECK-NEXT: cmpeq p1.d, p0/z, z1.d, z3.d +; CHECK-NEXT: mov z0.d, p1/m, d2 +; CHECK-NEXT: .LBB21_5: // %else2 +; CHECK-NEXT: ldr q3, [x1, #16] +; CHECK-NEXT: tbz w8, #2, .LBB21_7 +; CHECK-NEXT: // %bb.6: // %cond.load4 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: ptrue p1.d, vl1 +; CHECK-NEXT: ldr d2, [x9] +; CHECK-NEXT: sel z2.d, p1, z2.d, z0.d +; CHECK-NEXT: tbnz w8, #3, .LBB21_8 +; CHECK-NEXT: b .LBB21_9 +; CHECK-NEXT: .LBB21_7: +; CHECK-NEXT: // implicit-def: $q2 +; CHECK-NEXT: tbz w8, #3, .LBB21_9 +; CHECK-NEXT: .LBB21_8: // %cond.load7 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov w9, #1 +; CHECK-NEXT: fmov x8, d3 +; CHECK-NEXT: mov z4.d, x9 +; CHECK-NEXT: ldr d3, [x8] +; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z4.d +; CHECK-NEXT: mov z2.d, p0/m, d3 +; CHECK-NEXT: .LBB21_9: // %else8 +; CHECK-NEXT: stp q0, q2, [x0] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %cval = load <4 x double>, <4 x double>* %a + %ptrs = load <4 x double*>, <4 x double*>* %b + %mask = fcmp oeq <4 x double> %cval, zeroinitializer + %vals = call <4 x double> @llvm.masked.gather.v4f64(<4 x double*> %ptrs, i32 8, <4 x i1> %mask, <4 x double> undef) + store <4 x double> %vals, <4 x double>* %a + ret void +} + +; The above tests test the types, the below tests check that the addressing +; modes still function + +declare <4 x i8> @llvm.masked.gather.v4i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>) +declare <8 x i8> @llvm.masked.gather.v8i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>) +declare <16 x i8> @llvm.masked.gather.v16i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>) +declare <32 x i8> @llvm.masked.gather.v32i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>) + +declare <2 x i16> @llvm.masked.gather.v2i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>) +declare <4 x i16> @llvm.masked.gather.v4i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>) +declare <8 x i16> @llvm.masked.gather.v8i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>) +declare <16 x i16> @llvm.masked.gather.v16i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>) + +declare <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>) +declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) +declare <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>) + +declare <1 x i64> @llvm.masked.gather.v1i64(<1 x i64*>, i32, <1 x i1>, <1 x i64>) +declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*>, i32, <2 x i1>, <2 x i64>) +declare <4 x i64> @llvm.masked.gather.v4i64(<4 x i64*>, i32, <4 x i1>, <4 x i64>) + +declare <2 x half> @llvm.masked.gather.v2f16(<2 x half*>, i32, <2 x i1>, <2 x half>) +declare <4 x half> @llvm.masked.gather.v4f16(<4 x half*>, i32, <4 x i1>, <4 x half>) +declare <8 x half> @llvm.masked.gather.v8f16(<8 x half*>, i32, <8 x i1>, <8 x half>) +declare <16 x half> @llvm.masked.gather.v16f16(<16 x half*>, i32, <16 x i1>, <16 x half>) + +declare <2 x float> @llvm.masked.gather.v2f32(<2 x float*>, i32, <2 x i1>, <2 x float>) +declare <4 x float> @llvm.masked.gather.v4f32(<4 x float*>, i32, <4 x i1>, <4 x float>) +declare <8 x float> @llvm.masked.gather.v8f32(<8 x float*>, i32, <8 x i1>, <8 x float>) + +declare <1 x double> @llvm.masked.gather.v1f64(<1 x double*>, i32, <1 x i1>, <1 x double>) +declare <2 x double> @llvm.masked.gather.v2f64(<2 x double*>, i32, <2 x i1>, <2 x double>) +declare <4 x double> @llvm.masked.gather.v4f64(<4 x double*>, i32, <4 x i1>, <4 x double>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-scatter.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-scatter.ll @@ -0,0 +1,3099 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +define void @masked_scatter_v4i8(<4 x i8>* %a, <4 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI0_0 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI0_0] +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB0_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB0_6 +; CHECK-NEXT: .LBB0_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB0_7 +; CHECK-NEXT: .LBB0_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB0_8 +; CHECK-NEXT: .LBB0_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB0_5: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB0_2 +; CHECK-NEXT: .LBB0_6: // %cond.store1 +; CHECK-NEXT: mov z3.h, z0.h[1] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB0_3 +; CHECK-NEXT: .LBB0_7: // %cond.store3 +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB0_4 +; CHECK-NEXT: .LBB0_8: // %cond.store5 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x i8>, <4 x i8>* %a + %ptrs = load <4 x i8*>, <4 x i8*>* %b + %mask = icmp eq <4 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i8(<4 x i8> %vals, <4 x i8*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i8(<8 x i8>* %a, <8 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI1_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI1_0] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.b, z1.b[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z3.b, z1.b[2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z4.b, z1.b[3] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: mov z5.b, z1.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: mov z6.b, z1.b[5] +; CHECK-NEXT: mov z2.b, z1.b[6] +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z1.b, z1.b[7] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: ldp q2, q1, [x1, #32] +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: ldp q4, q3, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB1_9 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB1_10 +; CHECK-NEXT: .LBB1_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB1_11 +; CHECK-NEXT: .LBB1_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB1_12 +; CHECK-NEXT: .LBB1_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB1_13 +; CHECK-NEXT: .LBB1_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB1_14 +; CHECK-NEXT: .LBB1_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB1_15 +; CHECK-NEXT: .LBB1_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB1_16 +; CHECK-NEXT: .LBB1_8: // %else14 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB1_9: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB1_2 +; CHECK-NEXT: .LBB1_10: // %cond.store1 +; CHECK-NEXT: mov z5.b, z0.b[1] +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB1_3 +; CHECK-NEXT: .LBB1_11: // %cond.store3 +; CHECK-NEXT: mov z4.b, z0.b[2] +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB1_4 +; CHECK-NEXT: .LBB1_12: // %cond.store5 +; CHECK-NEXT: mov z4.b, z0.b[3] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #4, .LBB1_5 +; CHECK-NEXT: .LBB1_13: // %cond.store7 +; CHECK-NEXT: mov z3.b, z0.b[4] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #5, .LBB1_6 +; CHECK-NEXT: .LBB1_14: // %cond.store9 +; CHECK-NEXT: mov z3.b, z0.b[5] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #6, .LBB1_7 +; CHECK-NEXT: .LBB1_15: // %cond.store11 +; CHECK-NEXT: mov z2.b, z0.b[6] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #7, .LBB1_8 +; CHECK-NEXT: .LBB1_16: // %cond.store13 +; CHECK-NEXT: mov z0.b, z0.b[7] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <8 x i8>, <8 x i8>* %a + %ptrs = load <8 x i8*>, <8 x i8*>* %b + %mask = icmp eq <8 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i8(<8 x i8> %vals, <8 x i8*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16i8(<16 x i8>* %a, <16 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI2_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI2_0] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.b, z1.b[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: mov z3.b, z1.b[2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z4.b, z1.b[3] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: mov z5.b, z1.b[4] +; CHECK-NEXT: mov z7.b, z1.b[6] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: mov z6.b, z1.b[5] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: bfi w10, w11, #3, #1 +; CHECK-NEXT: fmov w11, s7 +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: mov z16.b, z1.b[7] +; CHECK-NEXT: bfi w10, w8, #4, #1 +; CHECK-NEXT: mov z17.b, z1.b[8] +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: mov z18.b, z1.b[9] +; CHECK-NEXT: bfi w10, w9, #5, #1 +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: orr w8, w10, w8, lsl #6 +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: fmov w11, s18 +; CHECK-NEXT: mov z19.b, z1.b[10] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z2.b, z1.b[11] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: mov z3.b, z1.b[12] +; CHECK-NEXT: orr w8, w8, w9, lsl #7 +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #8 +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: orr w8, w8, w9, lsl #9 +; CHECK-NEXT: mov z20.b, z1.b[13] +; CHECK-NEXT: mov z21.b, z1.b[14] +; CHECK-NEXT: mov z5.b, z1.b[15] +; CHECK-NEXT: and w9, w10, #0x1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: ldp q2, q1, [x1, #96] +; CHECK-NEXT: orr w8, w8, w9, lsl #10 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #11 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w10, s20 +; CHECK-NEXT: orr w8, w8, w9, lsl #12 +; CHECK-NEXT: fmov w9, s21 +; CHECK-NEXT: ldp q4, q3, [x1, #64] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #13 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: orr w8, w8, w9, lsl #14 +; CHECK-NEXT: ldp q6, q5, [x1, #32] +; CHECK-NEXT: orr w9, w8, w10, lsl #15 +; CHECK-NEXT: and w8, w9, #0xffff +; CHECK-NEXT: ldp q16, q7, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB2_17 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB2_18 +; CHECK-NEXT: .LBB2_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB2_19 +; CHECK-NEXT: .LBB2_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB2_20 +; CHECK-NEXT: .LBB2_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB2_21 +; CHECK-NEXT: .LBB2_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB2_22 +; CHECK-NEXT: .LBB2_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB2_23 +; CHECK-NEXT: .LBB2_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB2_24 +; CHECK-NEXT: .LBB2_8: // %else14 +; CHECK-NEXT: tbnz w8, #8, .LBB2_25 +; CHECK-NEXT: .LBB2_9: // %else16 +; CHECK-NEXT: tbnz w8, #9, .LBB2_26 +; CHECK-NEXT: .LBB2_10: // %else18 +; CHECK-NEXT: tbnz w8, #10, .LBB2_27 +; CHECK-NEXT: .LBB2_11: // %else20 +; CHECK-NEXT: tbnz w8, #11, .LBB2_28 +; CHECK-NEXT: .LBB2_12: // %else22 +; CHECK-NEXT: tbnz w8, #12, .LBB2_29 +; CHECK-NEXT: .LBB2_13: // %else24 +; CHECK-NEXT: tbnz w8, #13, .LBB2_30 +; CHECK-NEXT: .LBB2_14: // %else26 +; CHECK-NEXT: tbnz w8, #14, .LBB2_31 +; CHECK-NEXT: .LBB2_15: // %else28 +; CHECK-NEXT: tbnz w8, #15, .LBB2_32 +; CHECK-NEXT: .LBB2_16: // %else30 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB2_17: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d16 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB2_2 +; CHECK-NEXT: .LBB2_18: // %cond.store1 +; CHECK-NEXT: mov z17.b, z0.b[1] +; CHECK-NEXT: mov z16.d, z16.d[1] +; CHECK-NEXT: fmov w9, s17 +; CHECK-NEXT: fmov x10, d16 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB2_3 +; CHECK-NEXT: .LBB2_19: // %cond.store3 +; CHECK-NEXT: mov z16.b, z0.b[2] +; CHECK-NEXT: fmov x10, d7 +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB2_4 +; CHECK-NEXT: .LBB2_20: // %cond.store5 +; CHECK-NEXT: mov z16.b, z0.b[3] +; CHECK-NEXT: mov z7.d, z7.d[1] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: fmov x10, d7 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #4, .LBB2_5 +; CHECK-NEXT: .LBB2_21: // %cond.store7 +; CHECK-NEXT: mov z7.b, z0.b[4] +; CHECK-NEXT: fmov x10, d6 +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #5, .LBB2_6 +; CHECK-NEXT: .LBB2_22: // %cond.store9 +; CHECK-NEXT: mov z7.b, z0.b[5] +; CHECK-NEXT: mov z6.d, z6.d[1] +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: fmov x10, d6 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #6, .LBB2_7 +; CHECK-NEXT: .LBB2_23: // %cond.store11 +; CHECK-NEXT: mov z6.b, z0.b[6] +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #7, .LBB2_8 +; CHECK-NEXT: .LBB2_24: // %cond.store13 +; CHECK-NEXT: mov z6.b, z0.b[7] +; CHECK-NEXT: mov z5.d, z5.d[1] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #8, .LBB2_9 +; CHECK-NEXT: .LBB2_25: // %cond.store15 +; CHECK-NEXT: mov z5.b, z0.b[8] +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #9, .LBB2_10 +; CHECK-NEXT: .LBB2_26: // %cond.store17 +; CHECK-NEXT: mov z5.b, z0.b[9] +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #10, .LBB2_11 +; CHECK-NEXT: .LBB2_27: // %cond.store19 +; CHECK-NEXT: mov z4.b, z0.b[10] +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #11, .LBB2_12 +; CHECK-NEXT: .LBB2_28: // %cond.store21 +; CHECK-NEXT: mov z4.b, z0.b[11] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #12, .LBB2_13 +; CHECK-NEXT: .LBB2_29: // %cond.store23 +; CHECK-NEXT: mov z3.b, z0.b[12] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #13, .LBB2_14 +; CHECK-NEXT: .LBB2_30: // %cond.store25 +; CHECK-NEXT: mov z3.b, z0.b[13] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #14, .LBB2_15 +; CHECK-NEXT: .LBB2_31: // %cond.store27 +; CHECK-NEXT: mov z2.b, z0.b[14] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #15, .LBB2_16 +; CHECK-NEXT: .LBB2_32: // %cond.store29 +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <16 x i8>, <16 x i8>* %a + %ptrs = load <16 x i8*>, <16 x i8*>* %b + %mask = icmp eq <16 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v16i8(<16 x i8> %vals, <16 x i8*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v32i8(<32 x i8>* %a, <32 x i8*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI3_0 +; CHECK-NEXT: ptrue p0.b, vl16 +; CHECK-NEXT: ldp q3, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0] +; CHECK-NEXT: cmpeq p1.b, p0/z, z0.b, z1.b +; CHECK-NEXT: cmpeq p0.b, p0/z, z3.b, z1.b +; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z4.b, z2.b[1] +; CHECK-NEXT: mov z5.b, z2.b[2] +; CHECK-NEXT: fmov w8, s4 +; CHECK-NEXT: mov z6.b, z2.b[3] +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s6 +; CHECK-NEXT: lsl w9, w9, #16 +; CHECK-NEXT: mov z7.b, z2.b[4] +; CHECK-NEXT: bfi w9, w8, #17, #1 +; CHECK-NEXT: mov z17.b, z2.b[6] +; CHECK-NEXT: bfi w9, w10, #18, #1 +; CHECK-NEXT: mov z16.b, z2.b[5] +; CHECK-NEXT: fmov w8, s7 +; CHECK-NEXT: bfi w9, w11, #19, #1 +; CHECK-NEXT: fmov w11, s17 +; CHECK-NEXT: fmov w10, s16 +; CHECK-NEXT: mov z18.b, z2.b[7] +; CHECK-NEXT: mov z19.b, z2.b[8] +; CHECK-NEXT: bfi w9, w8, #20, #1 +; CHECK-NEXT: mov z20.b, z2.b[9] +; CHECK-NEXT: and w8, w11, #0x1 +; CHECK-NEXT: bfi w9, w10, #21, #1 +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: fmov w11, s20 +; CHECK-NEXT: orr w8, w9, w8, lsl #22 +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: mov z21.b, z2.b[10] +; CHECK-NEXT: mov z4.b, z2.b[11] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: mov z22.b, z2.b[12] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z5.b, z2.b[13] +; CHECK-NEXT: mov z6.b, z2.b[14] +; CHECK-NEXT: mov z2.b, z2.b[15] +; CHECK-NEXT: orr w8, w8, w9, lsl #23 +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #24 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: orr w8, w8, w9, lsl #25 +; CHECK-NEXT: fmov w9, s21 +; CHECK-NEXT: fmov w11, s22 +; CHECK-NEXT: mov z4.b, z1.b[1] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: mov z7.b, z1.b[6] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z16.b, z1.b[7] +; CHECK-NEXT: mov z17.b, z1.b[8] +; CHECK-NEXT: mov z18.b, z1.b[9] +; CHECK-NEXT: orr w8, w8, w9, lsl #26 +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #27 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: orr w8, w8, w9, lsl #28 +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov w11, s1 +; CHECK-NEXT: mov z5.b, z1.b[4] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: mov z6.b, z1.b[5] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: mov z19.b, z1.b[10] +; CHECK-NEXT: mov z20.b, z1.b[11] +; CHECK-NEXT: mov z21.b, z1.b[12] +; CHECK-NEXT: orr w8, w8, w9, lsl #29 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: orr w8, w8, w10, lsl #30 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z2.b, z1.b[2] +; CHECK-NEXT: mov z4.b, z1.b[3] +; CHECK-NEXT: fmov w12, s2 +; CHECK-NEXT: orr w8, w8, w9, lsl #31 +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: fmov w11, s5 +; CHECK-NEXT: bfi w9, w10, #1, #1 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: bfi w9, w12, #2, #1 +; CHECK-NEXT: fmov w12, s6 +; CHECK-NEXT: mov z22.b, z1.b[13] +; CHECK-NEXT: mov z23.b, z1.b[14] +; CHECK-NEXT: bfi w9, w10, #3, #1 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: bfi w9, w11, #4, #1 +; CHECK-NEXT: fmov w11, s16 +; CHECK-NEXT: bfi w9, w12, #5, #1 +; CHECK-NEXT: mov z24.b, z1.b[15] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #6 +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: orr w9, w9, w11, lsl #7 +; CHECK-NEXT: fmov w11, s18 +; CHECK-NEXT: ldp q2, q1, [x1, #224] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #8 +; CHECK-NEXT: fmov w10, s19 +; CHECK-NEXT: orr w9, w9, w11, lsl #9 +; CHECK-NEXT: fmov w11, s20 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #10 +; CHECK-NEXT: fmov w10, s21 +; CHECK-NEXT: orr w9, w9, w11, lsl #11 +; CHECK-NEXT: fmov w11, s22 +; CHECK-NEXT: ldp q5, q4, [x1, #192] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #12 +; CHECK-NEXT: fmov w10, s23 +; CHECK-NEXT: orr w9, w9, w11, lsl #13 +; CHECK-NEXT: fmov w11, s24 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: ldp q7, q6, [x1, #160] +; CHECK-NEXT: orr w9, w9, w10, lsl #14 +; CHECK-NEXT: orr w9, w9, w11, lsl #15 +; CHECK-NEXT: and w9, w9, #0xffff +; CHECK-NEXT: orr w8, w9, w8 +; CHECK-NEXT: ldp q17, q16, [x1, #128] +; CHECK-NEXT: ldp q19, q18, [x1, #96] +; CHECK-NEXT: ldp q21, q20, [x1, #64] +; CHECK-NEXT: ldp q23, q22, [x1, #32] +; CHECK-NEXT: ldp q25, q24, [x1] +; CHECK-NEXT: tbnz w8, #0, .LBB3_33 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB3_34 +; CHECK-NEXT: .LBB3_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB3_35 +; CHECK-NEXT: .LBB3_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB3_36 +; CHECK-NEXT: .LBB3_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB3_37 +; CHECK-NEXT: .LBB3_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB3_38 +; CHECK-NEXT: .LBB3_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB3_39 +; CHECK-NEXT: .LBB3_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB3_40 +; CHECK-NEXT: .LBB3_8: // %else14 +; CHECK-NEXT: tbnz w8, #8, .LBB3_41 +; CHECK-NEXT: .LBB3_9: // %else16 +; CHECK-NEXT: tbnz w8, #9, .LBB3_42 +; CHECK-NEXT: .LBB3_10: // %else18 +; CHECK-NEXT: tbnz w8, #10, .LBB3_43 +; CHECK-NEXT: .LBB3_11: // %else20 +; CHECK-NEXT: tbnz w8, #11, .LBB3_44 +; CHECK-NEXT: .LBB3_12: // %else22 +; CHECK-NEXT: tbnz w8, #12, .LBB3_45 +; CHECK-NEXT: .LBB3_13: // %else24 +; CHECK-NEXT: tbnz w8, #13, .LBB3_46 +; CHECK-NEXT: .LBB3_14: // %else26 +; CHECK-NEXT: tbnz w8, #14, .LBB3_47 +; CHECK-NEXT: .LBB3_15: // %else28 +; CHECK-NEXT: tbnz w8, #15, .LBB3_48 +; CHECK-NEXT: .LBB3_16: // %else30 +; CHECK-NEXT: tbnz w8, #16, .LBB3_49 +; CHECK-NEXT: .LBB3_17: // %else32 +; CHECK-NEXT: tbnz w8, #17, .LBB3_50 +; CHECK-NEXT: .LBB3_18: // %else34 +; CHECK-NEXT: tbnz w8, #18, .LBB3_51 +; CHECK-NEXT: .LBB3_19: // %else36 +; CHECK-NEXT: tbnz w8, #19, .LBB3_52 +; CHECK-NEXT: .LBB3_20: // %else38 +; CHECK-NEXT: tbnz w8, #20, .LBB3_53 +; CHECK-NEXT: .LBB3_21: // %else40 +; CHECK-NEXT: tbnz w8, #21, .LBB3_54 +; CHECK-NEXT: .LBB3_22: // %else42 +; CHECK-NEXT: tbnz w8, #22, .LBB3_55 +; CHECK-NEXT: .LBB3_23: // %else44 +; CHECK-NEXT: tbnz w8, #23, .LBB3_56 +; CHECK-NEXT: .LBB3_24: // %else46 +; CHECK-NEXT: tbnz w8, #24, .LBB3_57 +; CHECK-NEXT: .LBB3_25: // %else48 +; CHECK-NEXT: tbnz w8, #25, .LBB3_58 +; CHECK-NEXT: .LBB3_26: // %else50 +; CHECK-NEXT: tbnz w8, #26, .LBB3_59 +; CHECK-NEXT: .LBB3_27: // %else52 +; CHECK-NEXT: tbnz w8, #27, .LBB3_60 +; CHECK-NEXT: .LBB3_28: // %else54 +; CHECK-NEXT: tbnz w8, #28, .LBB3_61 +; CHECK-NEXT: .LBB3_29: // %else56 +; CHECK-NEXT: tbnz w8, #29, .LBB3_62 +; CHECK-NEXT: .LBB3_30: // %else58 +; CHECK-NEXT: tbnz w8, #30, .LBB3_63 +; CHECK-NEXT: .LBB3_31: // %else60 +; CHECK-NEXT: tbnz w8, #31, .LBB3_64 +; CHECK-NEXT: .LBB3_32: // %else62 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_33: // %cond.store +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d25 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB3_2 +; CHECK-NEXT: .LBB3_34: // %cond.store1 +; CHECK-NEXT: mov z26.b, z3.b[1] +; CHECK-NEXT: mov z25.d, z25.d[1] +; CHECK-NEXT: fmov w9, s26 +; CHECK-NEXT: fmov x10, d25 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB3_3 +; CHECK-NEXT: .LBB3_35: // %cond.store3 +; CHECK-NEXT: mov z25.b, z3.b[2] +; CHECK-NEXT: fmov x10, d24 +; CHECK-NEXT: fmov w9, s25 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB3_4 +; CHECK-NEXT: .LBB3_36: // %cond.store5 +; CHECK-NEXT: mov z25.b, z3.b[3] +; CHECK-NEXT: mov z24.d, z24.d[1] +; CHECK-NEXT: fmov w9, s25 +; CHECK-NEXT: fmov x10, d24 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #4, .LBB3_5 +; CHECK-NEXT: .LBB3_37: // %cond.store7 +; CHECK-NEXT: mov z24.b, z3.b[4] +; CHECK-NEXT: fmov x10, d23 +; CHECK-NEXT: fmov w9, s24 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #5, .LBB3_6 +; CHECK-NEXT: .LBB3_38: // %cond.store9 +; CHECK-NEXT: mov z24.b, z3.b[5] +; CHECK-NEXT: mov z23.d, z23.d[1] +; CHECK-NEXT: fmov w9, s24 +; CHECK-NEXT: fmov x10, d23 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #6, .LBB3_7 +; CHECK-NEXT: .LBB3_39: // %cond.store11 +; CHECK-NEXT: mov z23.b, z3.b[6] +; CHECK-NEXT: fmov x10, d22 +; CHECK-NEXT: fmov w9, s23 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #7, .LBB3_8 +; CHECK-NEXT: .LBB3_40: // %cond.store13 +; CHECK-NEXT: mov z23.b, z3.b[7] +; CHECK-NEXT: mov z22.d, z22.d[1] +; CHECK-NEXT: fmov w9, s23 +; CHECK-NEXT: fmov x10, d22 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #8, .LBB3_9 +; CHECK-NEXT: .LBB3_41: // %cond.store15 +; CHECK-NEXT: mov z22.b, z3.b[8] +; CHECK-NEXT: fmov x10, d21 +; CHECK-NEXT: fmov w9, s22 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #9, .LBB3_10 +; CHECK-NEXT: .LBB3_42: // %cond.store17 +; CHECK-NEXT: mov z22.b, z3.b[9] +; CHECK-NEXT: mov z21.d, z21.d[1] +; CHECK-NEXT: fmov w9, s22 +; CHECK-NEXT: fmov x10, d21 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #10, .LBB3_11 +; CHECK-NEXT: .LBB3_43: // %cond.store19 +; CHECK-NEXT: mov z21.b, z3.b[10] +; CHECK-NEXT: fmov x10, d20 +; CHECK-NEXT: fmov w9, s21 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #11, .LBB3_12 +; CHECK-NEXT: .LBB3_44: // %cond.store21 +; CHECK-NEXT: mov z21.b, z3.b[11] +; CHECK-NEXT: mov z20.d, z20.d[1] +; CHECK-NEXT: fmov w9, s21 +; CHECK-NEXT: fmov x10, d20 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #12, .LBB3_13 +; CHECK-NEXT: .LBB3_45: // %cond.store23 +; CHECK-NEXT: mov z20.b, z3.b[12] +; CHECK-NEXT: fmov x10, d19 +; CHECK-NEXT: fmov w9, s20 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #13, .LBB3_14 +; CHECK-NEXT: .LBB3_46: // %cond.store25 +; CHECK-NEXT: mov z20.b, z3.b[13] +; CHECK-NEXT: mov z19.d, z19.d[1] +; CHECK-NEXT: fmov w9, s20 +; CHECK-NEXT: fmov x10, d19 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #14, .LBB3_15 +; CHECK-NEXT: .LBB3_47: // %cond.store27 +; CHECK-NEXT: mov z19.b, z3.b[14] +; CHECK-NEXT: fmov x10, d18 +; CHECK-NEXT: fmov w9, s19 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #15, .LBB3_16 +; CHECK-NEXT: .LBB3_48: // %cond.store29 +; CHECK-NEXT: mov z3.b, z3.b[15] +; CHECK-NEXT: mov z18.d, z18.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d18 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #16, .LBB3_17 +; CHECK-NEXT: .LBB3_49: // %cond.store31 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d17 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #17, .LBB3_18 +; CHECK-NEXT: .LBB3_50: // %cond.store33 +; CHECK-NEXT: mov z3.b, z0.b[1] +; CHECK-NEXT: mov z17.d, z17.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d17 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #18, .LBB3_19 +; CHECK-NEXT: .LBB3_51: // %cond.store35 +; CHECK-NEXT: mov z3.b, z0.b[2] +; CHECK-NEXT: fmov x10, d16 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #19, .LBB3_20 +; CHECK-NEXT: .LBB3_52: // %cond.store37 +; CHECK-NEXT: mov z3.b, z0.b[3] +; CHECK-NEXT: mov z16.d, z16.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d16 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #20, .LBB3_21 +; CHECK-NEXT: .LBB3_53: // %cond.store39 +; CHECK-NEXT: mov z3.b, z0.b[4] +; CHECK-NEXT: fmov x10, d7 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #21, .LBB3_22 +; CHECK-NEXT: .LBB3_54: // %cond.store41 +; CHECK-NEXT: mov z3.b, z0.b[5] +; CHECK-NEXT: mov z7.d, z7.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d7 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #22, .LBB3_23 +; CHECK-NEXT: .LBB3_55: // %cond.store43 +; CHECK-NEXT: mov z3.b, z0.b[6] +; CHECK-NEXT: fmov x10, d6 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #23, .LBB3_24 +; CHECK-NEXT: .LBB3_56: // %cond.store45 +; CHECK-NEXT: mov z3.b, z0.b[7] +; CHECK-NEXT: mov z6.d, z6.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d6 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #24, .LBB3_25 +; CHECK-NEXT: .LBB3_57: // %cond.store47 +; CHECK-NEXT: mov z3.b, z0.b[8] +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #25, .LBB3_26 +; CHECK-NEXT: .LBB3_58: // %cond.store49 +; CHECK-NEXT: mov z3.b, z0.b[9] +; CHECK-NEXT: mov z5.d, z5.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #26, .LBB3_27 +; CHECK-NEXT: .LBB3_59: // %cond.store51 +; CHECK-NEXT: mov z3.b, z0.b[10] +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #27, .LBB3_28 +; CHECK-NEXT: .LBB3_60: // %cond.store53 +; CHECK-NEXT: mov z3.b, z0.b[11] +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #28, .LBB3_29 +; CHECK-NEXT: .LBB3_61: // %cond.store55 +; CHECK-NEXT: mov z3.b, z0.b[12] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #29, .LBB3_30 +; CHECK-NEXT: .LBB3_62: // %cond.store57 +; CHECK-NEXT: mov z3.b, z0.b[13] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #30, .LBB3_31 +; CHECK-NEXT: .LBB3_63: // %cond.store59 +; CHECK-NEXT: mov z2.b, z0.b[14] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strb w9, [x10] +; CHECK-NEXT: tbz w8, #31, .LBB3_32 +; CHECK-NEXT: .LBB3_64: // %cond.store61 +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strb w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <32 x i8>, <32 x i8>* %a + %ptrs = load <32 x i8*>, <32 x i8*>* %b + %mask = icmp eq <32 x i8> %vals, zeroinitializer + call void @llvm.masked.scatter.v32i8(<32 x i8> %vals, <32 x i8*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_scatter_v2i16(<2 x i16>* %a, <2 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w9, [x0, #2] +; CHECK-NEXT: adrp x8, .LCPI4_0 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w9, [sp, #4] +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI4_0] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp] +; CHECK-NEXT: ldr d0, [sp] +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbnz w9, #0, .LBB4_3 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB4_4 +; CHECK-NEXT: .LBB4_2: // %else2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_3: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB4_2 +; CHECK-NEXT: .LBB4_4: // %cond.store1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strh w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <2 x i16>, <2 x i16>* %a + %ptrs = load <2 x i16*>, <2 x i16*>* %b + %mask = icmp eq <2 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i16(<2 x i16> %vals, <2 x i16*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i16(<4 x i16>* %a, <4 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI5_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI5_0] +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB5_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB5_6 +; CHECK-NEXT: .LBB5_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB5_7 +; CHECK-NEXT: .LBB5_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB5_8 +; CHECK-NEXT: .LBB5_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB5_5: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB5_2 +; CHECK-NEXT: .LBB5_6: // %cond.store1 +; CHECK-NEXT: mov z3.h, z0.h[1] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB5_3 +; CHECK-NEXT: .LBB5_7: // %cond.store3 +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB5_4 +; CHECK-NEXT: .LBB5_8: // %cond.store5 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strh w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x i16>, <4 x i16>* %a + %ptrs = load <4 x i16*>, <4 x i16*>* %b + %mask = icmp eq <4 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i16(<4 x i16> %vals, <4 x i16*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i16(<8 x i16>* %a, <8 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI6_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI6_0] +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: mov z2.b, z1.b[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z3.b, z1.b[2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z4.b, z1.b[3] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: mov z5.b, z1.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: mov z6.b, z1.b[5] +; CHECK-NEXT: mov z2.b, z1.b[6] +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z1.b, z1.b[7] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: ldp q2, q1, [x1, #32] +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: ldp q4, q3, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB6_9 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB6_10 +; CHECK-NEXT: .LBB6_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB6_11 +; CHECK-NEXT: .LBB6_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB6_12 +; CHECK-NEXT: .LBB6_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB6_13 +; CHECK-NEXT: .LBB6_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB6_14 +; CHECK-NEXT: .LBB6_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB6_15 +; CHECK-NEXT: .LBB6_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB6_16 +; CHECK-NEXT: .LBB6_8: // %else14 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB6_9: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB6_2 +; CHECK-NEXT: .LBB6_10: // %cond.store1 +; CHECK-NEXT: mov z5.h, z0.h[1] +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB6_3 +; CHECK-NEXT: .LBB6_11: // %cond.store3 +; CHECK-NEXT: mov z4.h, z0.h[2] +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB6_4 +; CHECK-NEXT: .LBB6_12: // %cond.store5 +; CHECK-NEXT: mov z4.h, z0.h[3] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #4, .LBB6_5 +; CHECK-NEXT: .LBB6_13: // %cond.store7 +; CHECK-NEXT: mov z3.h, z0.h[4] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #5, .LBB6_6 +; CHECK-NEXT: .LBB6_14: // %cond.store9 +; CHECK-NEXT: mov z3.h, z0.h[5] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #6, .LBB6_7 +; CHECK-NEXT: .LBB6_15: // %cond.store11 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #7, .LBB6_8 +; CHECK-NEXT: .LBB6_16: // %cond.store13 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strh w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <8 x i16>, <8 x i16>* %a + %ptrs = load <8 x i16*>, <8 x i16*>* %b + %mask = icmp eq <8 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i16(<8 x i16> %vals, <8 x i16*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16i16(<16 x i16>* %a, <16 x i16*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI7_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI7_0] +; CHECK-NEXT: cmpeq p1.h, p0/z, z2.h, z1.h +; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z3.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.b, z3.b, z3.b +; CHECK-NEXT: mov z3.b, z1.b[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z4.b, z1.b[2] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z5.b, z1.b[3] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z6.b, z1.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s5 +; CHECK-NEXT: mov z7.b, z1.b[5] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z16.b, z1.b[6] +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: mov z17.b, z1.b[7] +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s16 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: mov z3.b, z1.b[1] +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: mov z4.b, z1.b[2] +; CHECK-NEXT: orr w9, w9, w11, lsl #6 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: orr w8, w9, w8, lsl #7 +; CHECK-NEXT: orr w8, w8, w10, lsl #8 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z5.b, z1.b[3] +; CHECK-NEXT: mov z6.b, z1.b[4] +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: mov z7.b, z1.b[5] +; CHECK-NEXT: mov z16.b, z1.b[6] +; CHECK-NEXT: mov z17.b, z1.b[7] +; CHECK-NEXT: orr w8, w8, w9, lsl #9 +; CHECK-NEXT: and w9, w10, #0x1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: orr w8, w8, w9, lsl #10 +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: ldp q3, q1, [x1, #96] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #11 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: orr w8, w8, w9, lsl #12 +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #13 +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: ldp q5, q4, [x1, #64] +; CHECK-NEXT: orr w8, w8, w9, lsl #14 +; CHECK-NEXT: orr w9, w8, w10, lsl #15 +; CHECK-NEXT: and w8, w9, #0xffff +; CHECK-NEXT: ldp q7, q6, [x1, #32] +; CHECK-NEXT: ldp q17, q16, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB7_17 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB7_18 +; CHECK-NEXT: .LBB7_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB7_19 +; CHECK-NEXT: .LBB7_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB7_20 +; CHECK-NEXT: .LBB7_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB7_21 +; CHECK-NEXT: .LBB7_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB7_22 +; CHECK-NEXT: .LBB7_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB7_23 +; CHECK-NEXT: .LBB7_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB7_24 +; CHECK-NEXT: .LBB7_8: // %else14 +; CHECK-NEXT: tbnz w8, #8, .LBB7_25 +; CHECK-NEXT: .LBB7_9: // %else16 +; CHECK-NEXT: tbnz w8, #9, .LBB7_26 +; CHECK-NEXT: .LBB7_10: // %else18 +; CHECK-NEXT: tbnz w8, #10, .LBB7_27 +; CHECK-NEXT: .LBB7_11: // %else20 +; CHECK-NEXT: tbnz w8, #11, .LBB7_28 +; CHECK-NEXT: .LBB7_12: // %else22 +; CHECK-NEXT: tbnz w8, #12, .LBB7_29 +; CHECK-NEXT: .LBB7_13: // %else24 +; CHECK-NEXT: tbnz w8, #13, .LBB7_30 +; CHECK-NEXT: .LBB7_14: // %else26 +; CHECK-NEXT: tbnz w8, #14, .LBB7_31 +; CHECK-NEXT: .LBB7_15: // %else28 +; CHECK-NEXT: tbnz w8, #15, .LBB7_32 +; CHECK-NEXT: .LBB7_16: // %else30 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB7_17: // %cond.store +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d17 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB7_2 +; CHECK-NEXT: .LBB7_18: // %cond.store1 +; CHECK-NEXT: mov z18.h, z2.h[1] +; CHECK-NEXT: mov z17.d, z17.d[1] +; CHECK-NEXT: fmov w9, s18 +; CHECK-NEXT: fmov x10, d17 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB7_3 +; CHECK-NEXT: .LBB7_19: // %cond.store3 +; CHECK-NEXT: mov z17.h, z2.h[2] +; CHECK-NEXT: fmov x10, d16 +; CHECK-NEXT: fmov w9, s17 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB7_4 +; CHECK-NEXT: .LBB7_20: // %cond.store5 +; CHECK-NEXT: mov z17.h, z2.h[3] +; CHECK-NEXT: mov z16.d, z16.d[1] +; CHECK-NEXT: fmov w9, s17 +; CHECK-NEXT: fmov x10, d16 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #4, .LBB7_5 +; CHECK-NEXT: .LBB7_21: // %cond.store7 +; CHECK-NEXT: mov z16.h, z2.h[4] +; CHECK-NEXT: fmov x10, d7 +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #5, .LBB7_6 +; CHECK-NEXT: .LBB7_22: // %cond.store9 +; CHECK-NEXT: mov z16.h, z2.h[5] +; CHECK-NEXT: mov z7.d, z7.d[1] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: fmov x10, d7 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #6, .LBB7_7 +; CHECK-NEXT: .LBB7_23: // %cond.store11 +; CHECK-NEXT: mov z7.h, z2.h[6] +; CHECK-NEXT: fmov x10, d6 +; CHECK-NEXT: fmov w9, s7 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #7, .LBB7_8 +; CHECK-NEXT: .LBB7_24: // %cond.store13 +; CHECK-NEXT: mov z2.h, z2.h[7] +; CHECK-NEXT: mov z6.d, z6.d[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d6 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #8, .LBB7_9 +; CHECK-NEXT: .LBB7_25: // %cond.store15 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #9, .LBB7_10 +; CHECK-NEXT: .LBB7_26: // %cond.store17 +; CHECK-NEXT: mov z2.h, z0.h[1] +; CHECK-NEXT: mov z5.d, z5.d[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #10, .LBB7_11 +; CHECK-NEXT: .LBB7_27: // %cond.store19 +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #11, .LBB7_12 +; CHECK-NEXT: .LBB7_28: // %cond.store21 +; CHECK-NEXT: mov z2.h, z0.h[3] +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #12, .LBB7_13 +; CHECK-NEXT: .LBB7_29: // %cond.store23 +; CHECK-NEXT: mov z2.h, z0.h[4] +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #13, .LBB7_14 +; CHECK-NEXT: .LBB7_30: // %cond.store25 +; CHECK-NEXT: mov z2.h, z0.h[5] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #14, .LBB7_15 +; CHECK-NEXT: .LBB7_31: // %cond.store27 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: strh w9, [x10] +; CHECK-NEXT: tbz w8, #15, .LBB7_16 +; CHECK-NEXT: .LBB7_32: // %cond.store29 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: strh w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <16 x i16>, <16 x i16>* %a + %ptrs = load <16 x i16*>, <16 x i16*>* %b + %mask = icmp eq <16 x i16> %vals, zeroinitializer + call void @llvm.masked.scatter.v16i16(<16 x i16> %vals, <16 x i16*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v2i32(<2 x i32>* %a, <2 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI8_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI8_0] +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbnz w9, #0, .LBB8_3 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB8_4 +; CHECK-NEXT: .LBB8_2: // %else2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB8_3: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB8_2 +; CHECK-NEXT: .LBB8_4: // %cond.store1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <2 x i32>, <2 x i32>* %a + %ptrs = load <2 x i32*>, <2 x i32*>* %b + %mask = icmp eq <2 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i32(<2 x i32> %vals, <2 x i32*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i32(<4 x i32>* %a, <4 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI9_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI9_0] +; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB9_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB9_6 +; CHECK-NEXT: .LBB9_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB9_7 +; CHECK-NEXT: .LBB9_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB9_8 +; CHECK-NEXT: .LBB9_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB9_5: // %cond.store +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB9_2 +; CHECK-NEXT: .LBB9_6: // %cond.store1 +; CHECK-NEXT: mov z3.s, z0.s[1] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB9_3 +; CHECK-NEXT: .LBB9_7: // %cond.store3 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB9_4 +; CHECK-NEXT: .LBB9_8: // %cond.store5 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x i32>, <4 x i32>* %a + %ptrs = load <4 x i32*>, <4 x i32*>* %b + %mask = icmp eq <4 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i32(<4 x i32> %vals, <4 x i32*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI10_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q2, q0, [x0] +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI10_0] +; CHECK-NEXT: cmpeq p1.s, p0/z, z0.s, z1.s +; CHECK-NEXT: cmpeq p0.s, p0/z, z2.s, z1.s +; CHECK-NEXT: mov z1.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z3.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: splice z3.h, p1, z3.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z3.b, z3.b +; CHECK-NEXT: mov z3.b, z1.b[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z4.b, z1.b[2] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z5.b, z1.b[3] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z6.b, z1.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s5 +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: mov z7.b, z1.b[5] +; CHECK-NEXT: mov z3.b, z1.b[6] +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z1.b, z1.b[7] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: ldp q3, q1, [x1, #32] +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: ldp q5, q4, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB10_9 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB10_10 +; CHECK-NEXT: .LBB10_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB10_11 +; CHECK-NEXT: .LBB10_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB10_12 +; CHECK-NEXT: .LBB10_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB10_13 +; CHECK-NEXT: .LBB10_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB10_14 +; CHECK-NEXT: .LBB10_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB10_15 +; CHECK-NEXT: .LBB10_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB10_16 +; CHECK-NEXT: .LBB10_8: // %else14 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB10_9: // %cond.store +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB10_2 +; CHECK-NEXT: .LBB10_10: // %cond.store1 +; CHECK-NEXT: mov z6.s, z2.s[1] +; CHECK-NEXT: mov z5.d, z5.d[1] +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: fmov x10, d5 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB10_3 +; CHECK-NEXT: .LBB10_11: // %cond.store3 +; CHECK-NEXT: mov z5.s, z2.s[2] +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: fmov w9, s5 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB10_4 +; CHECK-NEXT: .LBB10_12: // %cond.store5 +; CHECK-NEXT: mov z2.s, z2.s[3] +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d4 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #4, .LBB10_5 +; CHECK-NEXT: .LBB10_13: // %cond.store7 +; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #5, .LBB10_6 +; CHECK-NEXT: .LBB10_14: // %cond.store9 +; CHECK-NEXT: mov z2.s, z0.s[1] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #6, .LBB10_7 +; CHECK-NEXT: .LBB10_15: // %cond.store11 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: str w9, [x10] +; CHECK-NEXT: tbz w8, #7, .LBB10_8 +; CHECK-NEXT: .LBB10_16: // %cond.store13 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str w8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <8 x i32>, <8 x i32>* %a + %ptrs = load <8 x i32*>, <8 x i32*>* %b + %mask = icmp eq <8 x i32> %vals, zeroinitializer + call void @llvm.masked.scatter.v8i32(<8 x i32> %vals, <8 x i32*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v1i64(<1 x i64>* %a, <1 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: cbnz x8, .LBB11_2 +; CHECK-NEXT: // %bb.1: // %cond.store +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str x8, [x9] +; CHECK-NEXT: .LBB11_2: // %else +; CHECK-NEXT: ret + %vals = load <1 x i64>, <1 x i64>* %a + %ptrs = load <1 x i64*>, <1 x i64*>* %b + %mask = icmp eq <1 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v1i64(<1 x i64> %vals, <1 x i64*> %ptrs, i32 8, <1 x i1> %mask) + ret void +} + +define void @masked_scatter_v2i64(<2 x i64>* %a, <2 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI12_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI12_0] +; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbnz w9, #0, .LBB12_3 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB12_4 +; CHECK-NEXT: .LBB12_2: // %else2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB12_3: // %cond.store +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: str x9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB12_2 +; CHECK-NEXT: .LBB12_4: // %cond.store1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str x8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <2 x i64>, <2 x i64>* %a + %ptrs = load <2 x i64*>, <2 x i64*>* %b + %mask = icmp eq <2 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v2i64(<2 x i64> %vals, <2 x i64*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4i64(<4 x i64>* %a, <4 x i64*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI13_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI13_0] +; CHECK-NEXT: cmpeq p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: cmpeq p0.d, p0/z, z1.d, z2.d +; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z3.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: uzp1 z2.h, z3.h, z3.h +; CHECK-NEXT: mov z3.h, z2.h[1] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.h, z2.h[2] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z3.h, z2.h[3] +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB13_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB13_6 +; CHECK-NEXT: .LBB13_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB13_7 +; CHECK-NEXT: .LBB13_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB13_8 +; CHECK-NEXT: .LBB13_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB13_5: // %cond.store +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: str x9, [x10] +; CHECK-NEXT: tbz w8, #1, .LBB13_2 +; CHECK-NEXT: .LBB13_6: // %cond.store1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: str x9, [x10] +; CHECK-NEXT: tbz w8, #2, .LBB13_3 +; CHECK-NEXT: .LBB13_7: // %cond.store3 +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: str x9, [x10] +; CHECK-NEXT: tbz w8, #3, .LBB13_4 +; CHECK-NEXT: .LBB13_8: // %cond.store5 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: mov z1.d, z2.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str x8, [x9] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x i64>, <4 x i64>* %a + %ptrs = load <4 x i64*>, <4 x i64*>* %b + %mask = icmp eq <4 x i64> %vals, zeroinitializer + call void @llvm.masked.scatter.v4i64(<4 x i64> %vals, <4 x i64*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v2f16(<2 x half>* %a, <2 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI14_0 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI14_0] +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbnz w9, #0, .LBB14_3 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB14_4 +; CHECK-NEXT: .LBB14_2: // %else2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB14_3: // %cond.store +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str h0, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB14_2 +; CHECK-NEXT: .LBB14_4: // %cond.store1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str h0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <2 x half>, <2 x half>* %a + %ptrs = load <2 x half*>, <2 x half*>* %b + %mask = fcmp oeq <2 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v2f16(<2 x half> %vals, <2 x half*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4f16(<4 x half>* %a, <4 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI15_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI15_0] +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB15_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB15_6 +; CHECK-NEXT: .LBB15_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB15_7 +; CHECK-NEXT: .LBB15_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB15_8 +; CHECK-NEXT: .LBB15_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB15_5: // %cond.store +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: str h0, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB15_2 +; CHECK-NEXT: .LBB15_6: // %cond.store1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, z0.h[1] +; CHECK-NEXT: str h2, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB15_3 +; CHECK-NEXT: .LBB15_7: // %cond.store3 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z2.h, z0.h[2] +; CHECK-NEXT: str h2, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB15_4 +; CHECK-NEXT: .LBB15_8: // %cond.store5 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str h0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x half>, <4 x half>* %a + %ptrs = load <4 x half*>, <4 x half*>* %b + %mask = fcmp oeq <4 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v4f16(<4 x half> %vals, <4 x half*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8f16(<8 x half>* %a, <8 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI16_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI16_0] +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z1.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: mov z2.b, z1.b[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: mov z3.b, z1.b[2] +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z4.b, z1.b[3] +; CHECK-NEXT: fmov w10, s3 +; CHECK-NEXT: mov z5.b, z1.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s4 +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: mov z6.b, z1.b[5] +; CHECK-NEXT: mov z2.b, z1.b[6] +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z1.b, z1.b[7] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: ldp q2, q1, [x1, #32] +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: ldp q4, q3, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB16_9 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB16_10 +; CHECK-NEXT: .LBB16_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB16_11 +; CHECK-NEXT: .LBB16_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB16_12 +; CHECK-NEXT: .LBB16_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB16_13 +; CHECK-NEXT: .LBB16_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB16_14 +; CHECK-NEXT: .LBB16_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB16_15 +; CHECK-NEXT: .LBB16_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB16_16 +; CHECK-NEXT: .LBB16_8: // %else14 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB16_9: // %cond.store +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: str h0, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB16_2 +; CHECK-NEXT: .LBB16_10: // %cond.store1 +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: mov z4.h, z0.h[1] +; CHECK-NEXT: str h4, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB16_3 +; CHECK-NEXT: .LBB16_11: // %cond.store3 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z4.h, z0.h[2] +; CHECK-NEXT: str h4, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB16_4 +; CHECK-NEXT: .LBB16_12: // %cond.store5 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z3.h, z0.h[3] +; CHECK-NEXT: str h3, [x9] +; CHECK-NEXT: tbz w8, #4, .LBB16_5 +; CHECK-NEXT: .LBB16_13: // %cond.store7 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z3.h, z0.h[4] +; CHECK-NEXT: str h3, [x9] +; CHECK-NEXT: tbz w8, #5, .LBB16_6 +; CHECK-NEXT: .LBB16_14: // %cond.store9 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.h, z0.h[5] +; CHECK-NEXT: str h2, [x9] +; CHECK-NEXT: tbz w8, #6, .LBB16_7 +; CHECK-NEXT: .LBB16_15: // %cond.store11 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z2.h, z0.h[6] +; CHECK-NEXT: str h2, [x9] +; CHECK-NEXT: tbz w8, #7, .LBB16_8 +; CHECK-NEXT: .LBB16_16: // %cond.store13 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str h0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <8 x half>, <8 x half>* %a + %ptrs = load <8 x half*>, <8 x half*>* %b + %mask = fcmp oeq <8 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v8f16(<8 x half> %vals, <8 x half*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_scatter_v16f16(<16 x half>* %a, <16 x half*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI17_0 +; CHECK-NEXT: ptrue p0.h, vl8 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI17_0] +; CHECK-NEXT: fcmeq p1.h, p0/z, z1.h, z2.h +; CHECK-NEXT: fcmeq p0.h, p0/z, z0.h, z2.h +; CHECK-NEXT: mov z3.h, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z2.b, z3.b, z3.b +; CHECK-NEXT: mov z3.b, z2.b[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z4.b, z2.b[2] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z5.b, z2.b[3] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z6.b, z2.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s5 +; CHECK-NEXT: mov z7.b, z2.b[5] +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z16.b, z2.b[6] +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: mov z17.b, z2.b[7] +; CHECK-NEXT: mov z2.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w11, s16 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s17 +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: mov z3.b, z2.b[1] +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: mov z4.b, z2.b[2] +; CHECK-NEXT: orr w9, w9, w11, lsl #6 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: orr w8, w9, w8, lsl #7 +; CHECK-NEXT: orr w8, w8, w10, lsl #8 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z5.b, z2.b[3] +; CHECK-NEXT: mov z6.b, z2.b[4] +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: mov z7.b, z2.b[5] +; CHECK-NEXT: mov z16.b, z2.b[6] +; CHECK-NEXT: mov z17.b, z2.b[7] +; CHECK-NEXT: orr w8, w8, w9, lsl #9 +; CHECK-NEXT: and w9, w10, #0x1 +; CHECK-NEXT: fmov w10, s5 +; CHECK-NEXT: orr w8, w8, w9, lsl #10 +; CHECK-NEXT: fmov w9, s6 +; CHECK-NEXT: ldp q3, q2, [x1, #96] +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #11 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: orr w8, w8, w9, lsl #12 +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: orr w8, w8, w10, lsl #13 +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: ldp q5, q4, [x1, #64] +; CHECK-NEXT: orr w8, w8, w9, lsl #14 +; CHECK-NEXT: orr w9, w8, w10, lsl #15 +; CHECK-NEXT: and w8, w9, #0xffff +; CHECK-NEXT: ldp q7, q6, [x1, #32] +; CHECK-NEXT: ldp q17, q16, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB17_17 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB17_18 +; CHECK-NEXT: .LBB17_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB17_19 +; CHECK-NEXT: .LBB17_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB17_20 +; CHECK-NEXT: .LBB17_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB17_21 +; CHECK-NEXT: .LBB17_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB17_22 +; CHECK-NEXT: .LBB17_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB17_23 +; CHECK-NEXT: .LBB17_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB17_24 +; CHECK-NEXT: .LBB17_8: // %else14 +; CHECK-NEXT: tbnz w8, #8, .LBB17_25 +; CHECK-NEXT: .LBB17_9: // %else16 +; CHECK-NEXT: tbnz w8, #9, .LBB17_26 +; CHECK-NEXT: .LBB17_10: // %else18 +; CHECK-NEXT: tbnz w8, #10, .LBB17_27 +; CHECK-NEXT: .LBB17_11: // %else20 +; CHECK-NEXT: tbnz w8, #11, .LBB17_28 +; CHECK-NEXT: .LBB17_12: // %else22 +; CHECK-NEXT: tbnz w8, #12, .LBB17_29 +; CHECK-NEXT: .LBB17_13: // %else24 +; CHECK-NEXT: tbnz w8, #13, .LBB17_30 +; CHECK-NEXT: .LBB17_14: // %else26 +; CHECK-NEXT: tbnz w8, #14, .LBB17_31 +; CHECK-NEXT: .LBB17_15: // %else28 +; CHECK-NEXT: tbnz w8, #15, .LBB17_32 +; CHECK-NEXT: .LBB17_16: // %else30 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB17_17: // %cond.store +; CHECK-NEXT: fmov x9, d17 +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB17_2 +; CHECK-NEXT: .LBB17_18: // %cond.store1 +; CHECK-NEXT: mov z17.d, z17.d[1] +; CHECK-NEXT: fmov x9, d17 +; CHECK-NEXT: mov z17.h, z1.h[1] +; CHECK-NEXT: str h17, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB17_3 +; CHECK-NEXT: .LBB17_19: // %cond.store3 +; CHECK-NEXT: fmov x9, d16 +; CHECK-NEXT: mov z17.h, z1.h[2] +; CHECK-NEXT: str h17, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB17_4 +; CHECK-NEXT: .LBB17_20: // %cond.store5 +; CHECK-NEXT: mov z16.d, z16.d[1] +; CHECK-NEXT: fmov x9, d16 +; CHECK-NEXT: mov z16.h, z1.h[3] +; CHECK-NEXT: str h16, [x9] +; CHECK-NEXT: tbz w8, #4, .LBB17_5 +; CHECK-NEXT: .LBB17_21: // %cond.store7 +; CHECK-NEXT: fmov x9, d7 +; CHECK-NEXT: mov z16.h, z1.h[4] +; CHECK-NEXT: str h16, [x9] +; CHECK-NEXT: tbz w8, #5, .LBB17_6 +; CHECK-NEXT: .LBB17_22: // %cond.store9 +; CHECK-NEXT: mov z7.d, z7.d[1] +; CHECK-NEXT: fmov x9, d7 +; CHECK-NEXT: mov z7.h, z1.h[5] +; CHECK-NEXT: str h7, [x9] +; CHECK-NEXT: tbz w8, #6, .LBB17_7 +; CHECK-NEXT: .LBB17_23: // %cond.store11 +; CHECK-NEXT: fmov x9, d6 +; CHECK-NEXT: mov z7.h, z1.h[6] +; CHECK-NEXT: str h7, [x9] +; CHECK-NEXT: tbz w8, #7, .LBB17_8 +; CHECK-NEXT: .LBB17_24: // %cond.store13 +; CHECK-NEXT: mov z6.d, z6.d[1] +; CHECK-NEXT: mov z1.h, z1.h[7] +; CHECK-NEXT: fmov x9, d6 +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #8, .LBB17_9 +; CHECK-NEXT: .LBB17_25: // %cond.store15 +; CHECK-NEXT: fmov x9, d5 +; CHECK-NEXT: str h0, [x9] +; CHECK-NEXT: tbz w8, #9, .LBB17_10 +; CHECK-NEXT: .LBB17_26: // %cond.store17 +; CHECK-NEXT: mov z1.d, z5.d[1] +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z1.h, z0.h[1] +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #10, .LBB17_11 +; CHECK-NEXT: .LBB17_27: // %cond.store19 +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: mov z1.h, z0.h[2] +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #11, .LBB17_12 +; CHECK-NEXT: .LBB17_28: // %cond.store21 +; CHECK-NEXT: mov z1.d, z4.d[1] +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z1.h, z0.h[3] +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #12, .LBB17_13 +; CHECK-NEXT: .LBB17_29: // %cond.store23 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: mov z1.h, z0.h[4] +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #13, .LBB17_14 +; CHECK-NEXT: .LBB17_30: // %cond.store25 +; CHECK-NEXT: mov z1.d, z3.d[1] +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z1.h, z0.h[5] +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #14, .LBB17_15 +; CHECK-NEXT: .LBB17_31: // %cond.store27 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z1.h, z0.h[6] +; CHECK-NEXT: str h1, [x9] +; CHECK-NEXT: tbz w8, #15, .LBB17_16 +; CHECK-NEXT: .LBB17_32: // %cond.store29 +; CHECK-NEXT: mov z1.d, z2.d[1] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str h0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <16 x half>, <16 x half>* %a + %ptrs = load <16 x half*>, <16 x half*>* %b + %mask = fcmp oeq <16 x half> %vals, zeroinitializer + call void @llvm.masked.scatter.v16f16(<16 x half> %vals, <16 x half*> %ptrs, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_scatter_v2f32(<2 x float>* %a, <2 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI18_0 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr d1, [x8, :lo12:.LCPI18_0] +; CHECK-NEXT: fcmeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbnz w9, #0, .LBB18_3 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB18_4 +; CHECK-NEXT: .LBB18_2: // %else2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB18_3: // %cond.store +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str s0, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB18_2 +; CHECK-NEXT: .LBB18_4: // %cond.store1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str s0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <2 x float>, <2 x float>* %a + %ptrs = load <2 x float*>, <2 x float*>* %b + %mask = fcmp oeq <2 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v2f32(<2 x float> %vals, <2 x float*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4f32(<4 x float>* %a, <4 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI19_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI19_0] +; CHECK-NEXT: fcmeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z1.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: mov z2.h, z1.h[1] +; CHECK-NEXT: fmov w10, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: mov z2.h, z1.h[2] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z2.h, z1.h[3] +; CHECK-NEXT: fmov w11, s2 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q2, q1, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB19_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB19_6 +; CHECK-NEXT: .LBB19_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB19_7 +; CHECK-NEXT: .LBB19_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB19_8 +; CHECK-NEXT: .LBB19_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB19_5: // %cond.store +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: str s0, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB19_2 +; CHECK-NEXT: .LBB19_6: // %cond.store1 +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z2.s, z0.s[1] +; CHECK-NEXT: str s2, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB19_3 +; CHECK-NEXT: .LBB19_7: // %cond.store3 +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z2.s, z0.s[2] +; CHECK-NEXT: str s2, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB19_4 +; CHECK-NEXT: .LBB19_8: // %cond.store5 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str s0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x float>, <4 x float>* %a + %ptrs = load <4 x float*>, <4 x float*>* %b + %mask = fcmp oeq <4 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v4f32(<4 x float> %vals, <4 x float*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_scatter_v8f32(<8 x float>* %a, <8 x float*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI20_0 +; CHECK-NEXT: ptrue p0.s, vl4 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI20_0] +; CHECK-NEXT: fcmeq p1.s, p0/z, z0.s, z2.s +; CHECK-NEXT: fcmeq p0.s, p0/z, z1.s, z2.s +; CHECK-NEXT: mov z2.s, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z3.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p1.h, vl4 +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: splice z3.h, p1, z3.h, z2.h +; CHECK-NEXT: uzp1 z2.b, z3.b, z3.b +; CHECK-NEXT: mov z3.b, z2.b[1] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: mov z4.b, z2.b[2] +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z5.b, z2.b[3] +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: mov z6.b, z2.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s5 +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: mov z7.b, z2.b[5] +; CHECK-NEXT: mov z3.b, z2.b[6] +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w10, s7 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z2.b, z2.b[7] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: ldp q3, q2, [x1, #32] +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: ldp q5, q4, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB20_9 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB20_10 +; CHECK-NEXT: .LBB20_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB20_11 +; CHECK-NEXT: .LBB20_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB20_12 +; CHECK-NEXT: .LBB20_4: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB20_13 +; CHECK-NEXT: .LBB20_5: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB20_14 +; CHECK-NEXT: .LBB20_6: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB20_15 +; CHECK-NEXT: .LBB20_7: // %else12 +; CHECK-NEXT: tbnz w8, #7, .LBB20_16 +; CHECK-NEXT: .LBB20_8: // %else14 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB20_9: // %cond.store +; CHECK-NEXT: fmov x9, d5 +; CHECK-NEXT: str s1, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB20_2 +; CHECK-NEXT: .LBB20_10: // %cond.store1 +; CHECK-NEXT: mov z5.d, z5.d[1] +; CHECK-NEXT: fmov x9, d5 +; CHECK-NEXT: mov z5.s, z1.s[1] +; CHECK-NEXT: str s5, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB20_3 +; CHECK-NEXT: .LBB20_11: // %cond.store3 +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: mov z5.s, z1.s[2] +; CHECK-NEXT: str s5, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB20_4 +; CHECK-NEXT: .LBB20_12: // %cond.store5 +; CHECK-NEXT: mov z4.d, z4.d[1] +; CHECK-NEXT: mov z1.s, z1.s[3] +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: str s1, [x9] +; CHECK-NEXT: tbz w8, #4, .LBB20_5 +; CHECK-NEXT: .LBB20_13: // %cond.store7 +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: str s0, [x9] +; CHECK-NEXT: tbz w8, #5, .LBB20_6 +; CHECK-NEXT: .LBB20_14: // %cond.store9 +; CHECK-NEXT: mov z1.d, z3.d[1] +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: mov z1.s, z0.s[1] +; CHECK-NEXT: str s1, [x9] +; CHECK-NEXT: tbz w8, #6, .LBB20_7 +; CHECK-NEXT: .LBB20_15: // %cond.store11 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: mov z1.s, z0.s[2] +; CHECK-NEXT: str s1, [x9] +; CHECK-NEXT: tbz w8, #7, .LBB20_8 +; CHECK-NEXT: .LBB20_16: // %cond.store13 +; CHECK-NEXT: mov z1.d, z2.d[1] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str s0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <8 x float>, <8 x float>* %a + %ptrs = load <8 x float*>, <8 x float*>* %b + %mask = fcmp oeq <8 x float> %vals, zeroinitializer + call void @llvm.masked.scatter.v8f32(<8 x float> %vals, <8 x float*> %ptrs, i32 8, <8 x i1> %mask) + ret void +} + +; Scalarize 1 x double scatters +define void @masked_scatter_v1f64(<1 x double>* %a, <1 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v1f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: fcmp d0, #0.0 +; CHECK-NEXT: b.ne .LBB21_2 +; CHECK-NEXT: // %bb.1: // %cond.store +; CHECK-NEXT: ldr d1, [x1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str d0, [x8] +; CHECK-NEXT: .LBB21_2: // %else +; CHECK-NEXT: ret + %vals = load <1 x double>, <1 x double>* %a + %ptrs = load <1 x double*>, <1 x double*>* %b + %mask = fcmp oeq <1 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v1f64(<1 x double> %vals, <1 x double*> %ptrs, i32 8, <1 x i1> %mask) + ret void +} + +define void @masked_scatter_v2f64(<2 x double>* %a, <2 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI22_0 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI22_0] +; CHECK-NEXT: fcmeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z1.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: mov z2.s, z1.s[1] +; CHECK-NEXT: fmov w9, s1 +; CHECK-NEXT: fmov w8, s2 +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: bfi w9, w8, #1, #31 +; CHECK-NEXT: and w8, w9, #0x3 +; CHECK-NEXT: tbnz w9, #0, .LBB22_3 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB22_4 +; CHECK-NEXT: .LBB22_2: // %else2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB22_3: // %cond.store +; CHECK-NEXT: fmov x9, d1 +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB22_2 +; CHECK-NEXT: .LBB22_4: // %cond.store1 +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str d0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <2 x double>, <2 x double>* %a + %ptrs = load <2 x double*>, <2 x double*>* %b + %mask = fcmp oeq <2 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v2f64(<2 x double> %vals, <2 x double*> %ptrs, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_scatter_v4f64(<4 x double>* %a, <4 x double*>* %b) #0 { +; CHECK-LABEL: masked_scatter_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: adrp x8, .LCPI23_0 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q1, q0, [x0] +; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI23_0] +; CHECK-NEXT: fcmeq p1.d, p0/z, z0.d, z2.d +; CHECK-NEXT: fcmeq p0.d, p0/z, z1.d, z2.d +; CHECK-NEXT: mov z2.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z3.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: uzp1 z2.h, z3.h, z3.h +; CHECK-NEXT: mov z3.h, z2.h[1] +; CHECK-NEXT: fmov w10, s2 +; CHECK-NEXT: fmov w8, s3 +; CHECK-NEXT: mov z3.h, z2.h[2] +; CHECK-NEXT: fmov w9, s3 +; CHECK-NEXT: mov z3.h, z2.h[3] +; CHECK-NEXT: fmov w11, s3 +; CHECK-NEXT: bfi w10, w8, #1, #1 +; CHECK-NEXT: ldp q3, q2, [x1] +; CHECK-NEXT: bfi w10, w9, #2, #1 +; CHECK-NEXT: bfi w10, w11, #3, #29 +; CHECK-NEXT: and w8, w10, #0xf +; CHECK-NEXT: tbnz w10, #0, .LBB23_5 +; CHECK-NEXT: // %bb.1: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB23_6 +; CHECK-NEXT: .LBB23_2: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB23_7 +; CHECK-NEXT: .LBB23_3: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB23_8 +; CHECK-NEXT: .LBB23_4: // %else6 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB23_5: // %cond.store +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: str d1, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB23_2 +; CHECK-NEXT: .LBB23_6: // %cond.store1 +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: str d1, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB23_3 +; CHECK-NEXT: .LBB23_7: // %cond.store3 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB23_4 +; CHECK-NEXT: .LBB23_8: // %cond.store5 +; CHECK-NEXT: mov z1.d, z2.d[1] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str d0, [x8] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %vals = load <4 x double>, <4 x double>* %a + %ptrs = load <4 x double*>, <4 x double*>* %b + %mask = fcmp oeq <4 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v4f64(<4 x double> %vals, <4 x double*> %ptrs, i32 8, <4 x i1> %mask) + ret void +} + +; The above tests test the types, the below tests check that the addressing +; modes still function + +define void @masked_scatter_32b_scaled_sext_f64(<32 x double>* %a, <32 x i32>* %b, double* %base) #0 { +; CHECK-LABEL: masked_scatter_32b_scaled_sext_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: stp d15, d14, [sp, #-96]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: stp x29, x21, [sp, #64] // 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w21, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: .cfi_offset b8, -40 +; CHECK-NEXT: .cfi_offset b9, -48 +; CHECK-NEXT: .cfi_offset b10, -56 +; CHECK-NEXT: .cfi_offset b11, -64 +; CHECK-NEXT: .cfi_offset b12, -72 +; CHECK-NEXT: .cfi_offset b13, -80 +; CHECK-NEXT: .cfi_offset b14, -88 +; CHECK-NEXT: .cfi_offset b15, -96 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 96 + 8 * VG +; CHECK-NEXT: sub sp, sp, #32 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x80, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 128 + 8 * VG +; CHECK-NEXT: adrp x8, .LCPI24_1 +; CHECK-NEXT: ptrue p0.d, vl2 +; CHECK-NEXT: ldp q4, q2, [x0, #160] +; CHECK-NEXT: ldr q23, [x8, :lo12:.LCPI24_1] +; CHECK-NEXT: ldp q17, q7, [x0, #128] +; CHECK-NEXT: fcmeq p1.d, p0/z, z2.d, z23.d +; CHECK-NEXT: mov z0.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p1.d, p0/z, z4.d, z23.d +; CHECK-NEXT: mov z1.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: ptrue p1.s, vl2 +; CHECK-NEXT: fcmeq p2.d, p0/z, z7.d, z23.d +; CHECK-NEXT: fcmeq p3.d, p0/z, z17.d, z23.d +; CHECK-NEXT: splice z1.s, p1, z1.s, z0.s +; CHECK-NEXT: mov z0.d, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z3.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z3.s, p1, z3.s, z0.s +; CHECK-NEXT: ptrue p2.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z3.h, z3.h +; CHECK-NEXT: splice z0.h, p2, z0.h, z1.h +; CHECK-NEXT: uzp1 z6.b, z0.b, z0.b +; CHECK-NEXT: ldp q1, q0, [x0, #224] +; CHECK-NEXT: fmov w8, s6 +; CHECK-NEXT: mov z16.b, z6.b[1] +; CHECK-NEXT: fmov w9, s16 +; CHECK-NEXT: mov z20.b, z6.b[2] +; CHECK-NEXT: fmov w10, s20 +; CHECK-NEXT: mov z24.b, z6.b[3] +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: mov z8.b, z6.b[5] +; CHECK-NEXT: lsl w8, w8, #16 +; CHECK-NEXT: mov z25.b, z6.b[4] +; CHECK-NEXT: ldp q5, q3, [x0, #192] +; CHECK-NEXT: bfi w8, w9, #17, #1 +; CHECK-NEXT: fmov w9, s24 +; CHECK-NEXT: bfi w8, w10, #18, #1 +; CHECK-NEXT: add x10, sp, #32 +; CHECK-NEXT: fcmeq p3.d, p0/z, z0.d, z23.d +; CHECK-NEXT: mov z9.b, z6.b[6] +; CHECK-NEXT: mov z24.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z1.d, z23.d +; CHECK-NEXT: bfi w8, w9, #19, #1 +; CHECK-NEXT: fmov w9, s8 +; CHECK-NEXT: mov z8.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z3.d, z23.d +; CHECK-NEXT: ldp q21, q18, [x0, #64] +; CHECK-NEXT: mov z10.b, z6.b[7] +; CHECK-NEXT: fmov w11, s25 +; CHECK-NEXT: uzp1 z24.s, z24.s, z24.s +; CHECK-NEXT: uzp1 z8.s, z8.s, z8.s +; CHECK-NEXT: splice z8.s, p1, z8.s, z24.s +; CHECK-NEXT: uzp1 z24.h, z8.h, z8.h +; CHECK-NEXT: bfi w8, w11, #20, #1 +; CHECK-NEXT: fmov w11, s10 +; CHECK-NEXT: bfi w8, w9, #21, #1 +; CHECK-NEXT: ldp q22, q19, [x0, #96] +; CHECK-NEXT: and w9, w11, #0x1 +; CHECK-NEXT: ldp q30, q29, [x0] +; CHECK-NEXT: ldp q28, q26, [x0, #32] +; CHECK-NEXT: ldp q16, q6, [x1, #96] +; CHECK-NEXT: ldp q25, q20, [x1, #64] +; CHECK-NEXT: ldp q31, q27, [x1, #32] +; CHECK-NEXT: str z0, [x10] // 16-byte Folded Spill +; CHECK-NEXT: fmov w10, s9 +; CHECK-NEXT: mov z9.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z5.d, z23.d +; CHECK-NEXT: mov z11.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z9.s, z9.s, z9.s +; CHECK-NEXT: uzp1 z11.s, z11.s, z11.s +; CHECK-NEXT: fcmeq p3.d, p0/z, z26.d, z23.d +; CHECK-NEXT: splice z11.s, p1, z11.s, z9.s +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: uzp1 z8.h, z11.h, z11.h +; CHECK-NEXT: splice z8.h, p2, z8.h, z24.h +; CHECK-NEXT: orr w8, w8, w10, lsl #22 +; CHECK-NEXT: uzp1 z9.b, z8.b, z8.b +; CHECK-NEXT: orr w8, w8, w9, lsl #23 +; CHECK-NEXT: mov z10.b, z9.b[1] +; CHECK-NEXT: mov z11.b, z9.b[2] +; CHECK-NEXT: mov z12.b, z9.b[3] +; CHECK-NEXT: mov z13.b, z9.b[4] +; CHECK-NEXT: mov z14.b, z9.b[5] +; CHECK-NEXT: mov z8.b, z9.b[6] +; CHECK-NEXT: mov z24.b, z9.b[7] +; CHECK-NEXT: fmov w14, s9 +; CHECK-NEXT: mov z9.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z28.d, z23.d +; CHECK-NEXT: fmov w12, s10 +; CHECK-NEXT: mov z10.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z29.d, z23.d +; CHECK-NEXT: fmov w13, s11 +; CHECK-NEXT: mov z11.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z30.d, z23.d +; CHECK-NEXT: mov z15.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z9.s, z9.s, z9.s +; CHECK-NEXT: uzp1 z10.s, z10.s, z10.s +; CHECK-NEXT: uzp1 z11.s, z11.s, z11.s +; CHECK-NEXT: uzp1 z15.s, z15.s, z15.s +; CHECK-NEXT: splice z10.s, p1, z10.s, z9.s +; CHECK-NEXT: splice z15.s, p1, z15.s, z11.s +; CHECK-NEXT: uzp1 z9.h, z10.h, z10.h +; CHECK-NEXT: uzp1 z10.h, z15.h, z15.h +; CHECK-NEXT: fcmeq p3.d, p0/z, z19.d, z23.d +; CHECK-NEXT: splice z10.h, p2, z10.h, z9.h +; CHECK-NEXT: fmov w17, s12 +; CHECK-NEXT: uzp1 z9.b, z10.b, z10.b +; CHECK-NEXT: fmov w16, s13 +; CHECK-NEXT: mov z10.b, z9.b[1] +; CHECK-NEXT: fmov w15, s14 +; CHECK-NEXT: fmov w18, s10 +; CHECK-NEXT: mov z10.b, z9.b[2] +; CHECK-NEXT: mov z11.b, z9.b[3] +; CHECK-NEXT: mov z12.b, z9.b[4] +; CHECK-NEXT: mov z13.b, z9.b[5] +; CHECK-NEXT: mov z14.b, z9.b[6] +; CHECK-NEXT: mov z15.b, z9.b[7] +; CHECK-NEXT: fmov w6, s9 +; CHECK-NEXT: mov z9.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z22.d, z23.d +; CHECK-NEXT: fmov w4, s10 +; CHECK-NEXT: mov z10.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z18.d, z23.d +; CHECK-NEXT: fmov w0, s11 +; CHECK-NEXT: mov z11.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p3.d, p0/z, z21.d, z23.d +; CHECK-NEXT: uzp1 z23.s, z10.s, z10.s +; CHECK-NEXT: mov z10.d, p3/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z9.s, z9.s, z9.s +; CHECK-NEXT: uzp1 z11.s, z11.s, z11.s +; CHECK-NEXT: uzp1 z10.s, z10.s, z10.s +; CHECK-NEXT: and w10, w14, #0x1 +; CHECK-NEXT: splice z23.s, p1, z23.s, z9.s +; CHECK-NEXT: splice z10.s, p1, z10.s, z11.s +; CHECK-NEXT: uzp1 z23.h, z23.h, z23.h +; CHECK-NEXT: uzp1 z9.h, z10.h, z10.h +; CHECK-NEXT: orr w8, w8, w10, lsl #24 +; CHECK-NEXT: adrp x10, .LCPI24_0 +; CHECK-NEXT: splice z9.h, p2, z9.h, z23.h +; CHECK-NEXT: and w9, w12, #0x1 +; CHECK-NEXT: uzp1 z23.b, z9.b, z9.b +; CHECK-NEXT: and w11, w13, #0x1 +; CHECK-NEXT: fmov w3, s12 +; CHECK-NEXT: fmov w5, s13 +; CHECK-NEXT: fmov w7, s14 +; CHECK-NEXT: fmov w19, s15 +; CHECK-NEXT: mov z9.b, z23.b[1] +; CHECK-NEXT: mov z10.b, z23.b[2] +; CHECK-NEXT: mov z11.b, z23.b[3] +; CHECK-NEXT: mov z12.b, z23.b[4] +; CHECK-NEXT: mov z13.b, z23.b[5] +; CHECK-NEXT: mov z14.b, z23.b[6] +; CHECK-NEXT: mov z15.b, z23.b[7] +; CHECK-NEXT: fmov w20, s23 +; CHECK-NEXT: orr w8, w8, w9, lsl #25 +; CHECK-NEXT: and w9, w17, #0x1 +; CHECK-NEXT: ldr q23, [x10, :lo12:.LCPI24_0] +; CHECK-NEXT: and w10, w6, #0x1 +; CHECK-NEXT: orr w8, w8, w11, lsl #26 +; CHECK-NEXT: bfi w10, w18, #1, #1 +; CHECK-NEXT: orr w8, w8, w9, lsl #27 +; CHECK-NEXT: and w9, w16, #0x1 +; CHECK-NEXT: bfi w10, w4, #2, #1 +; CHECK-NEXT: and w12, w15, #0x1 +; CHECK-NEXT: bfi w10, w0, #3, #1 +; CHECK-NEXT: fmov w21, s9 +; CHECK-NEXT: orr w8, w8, w9, lsl #28 +; CHECK-NEXT: bfi w10, w3, #4, #1 +; CHECK-NEXT: and w9, w7, #0x1 +; CHECK-NEXT: orr w8, w8, w12, lsl #29 +; CHECK-NEXT: bfi w10, w5, #5, #1 +; CHECK-NEXT: and w12, w19, #0x1 +; CHECK-NEXT: orr w9, w10, w9, lsl #6 +; CHECK-NEXT: and w10, w20, #0x1 +; CHECK-NEXT: fmov w11, s10 +; CHECK-NEXT: orr w9, w9, w12, lsl #7 +; CHECK-NEXT: and w12, w21, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #8 +; CHECK-NEXT: fmov w10, s11 +; CHECK-NEXT: orr w9, w9, w12, lsl #9 +; CHECK-NEXT: fmov w12, s12 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: orr w9, w9, w11, lsl #10 +; CHECK-NEXT: and w11, w12, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #11 +; CHECK-NEXT: fmov w10, s13 +; CHECK-NEXT: orr w9, w9, w11, lsl #12 +; CHECK-NEXT: fmov w11, s14 +; CHECK-NEXT: fmov w12, s8 +; CHECK-NEXT: and w10, w10, #0x1 +; CHECK-NEXT: and w11, w11, #0x1 +; CHECK-NEXT: orr w9, w9, w10, lsl #13 +; CHECK-NEXT: fmov w10, s15 +; CHECK-NEXT: and w12, w12, #0x1 +; CHECK-NEXT: orr w9, w9, w11, lsl #14 +; CHECK-NEXT: fmov w11, s24 +; CHECK-NEXT: ldp q0, q9, [x1] +; CHECK-NEXT: stp x2, x2, [sp] +; CHECK-NEXT: orr w8, w8, w12, lsl #30 +; CHECK-NEXT: orr w9, w9, w10, lsl #15 +; CHECK-NEXT: orr w8, w8, w11, lsl #31 +; CHECK-NEXT: and w9, w9, #0xffff +; CHECK-NEXT: orr w8, w9, w8 +; CHECK-NEXT: sunpklo z8.d, z0.s +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #8 +; CHECK-NEXT: ldr q24, [sp] +; CHECK-NEXT: movprfx z10, z8 +; CHECK-NEXT: lsl z10.d, p0/m, z10.d, z23.d +; CHECK-NEXT: sunpklo z0.d, z0.s +; CHECK-NEXT: movprfx z8, z0 +; CHECK-NEXT: lsl z8.d, p0/m, z8.d, z23.d +; CHECK-NEXT: add z10.d, z24.d, z10.d +; CHECK-NEXT: tbz w8, #0, .LBB24_2 +; CHECK-NEXT: // %bb.1: // %cond.store +; CHECK-NEXT: fmov x9, d10 +; CHECK-NEXT: str d30, [x9] +; CHECK-NEXT: .LBB24_2: // %else +; CHECK-NEXT: sunpklo z11.d, z9.s +; CHECK-NEXT: add z8.d, z24.d, z8.d +; CHECK-NEXT: tbz w8, #1, .LBB24_4 +; CHECK-NEXT: // %bb.3: // %cond.store1 +; CHECK-NEXT: mov z0.d, z10.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z30.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_4: // %else2 +; CHECK-NEXT: ext z9.b, z9.b, z9.b, #8 +; CHECK-NEXT: movprfx z30, z11 +; CHECK-NEXT: lsl z30.d, p0/m, z30.d, z23.d +; CHECK-NEXT: tbz w8, #2, .LBB24_6 +; CHECK-NEXT: // %bb.5: // %cond.store3 +; CHECK-NEXT: fmov x9, d8 +; CHECK-NEXT: str d29, [x9] +; CHECK-NEXT: .LBB24_6: // %else4 +; CHECK-NEXT: sunpklo z9.d, z9.s +; CHECK-NEXT: add z30.d, z24.d, z30.d +; CHECK-NEXT: tbz w8, #3, .LBB24_8 +; CHECK-NEXT: // %bb.7: // %cond.store5 +; CHECK-NEXT: mov z0.d, z8.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z29.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_8: // %else6 +; CHECK-NEXT: movprfx z29, z9 +; CHECK-NEXT: lsl z29.d, p0/m, z29.d, z23.d +; CHECK-NEXT: tbz w8, #4, .LBB24_10 +; CHECK-NEXT: // %bb.9: // %cond.store7 +; CHECK-NEXT: fmov x9, d30 +; CHECK-NEXT: str d28, [x9] +; CHECK-NEXT: .LBB24_10: // %else8 +; CHECK-NEXT: sunpklo z8.d, z31.s +; CHECK-NEXT: add z29.d, z24.d, z29.d +; CHECK-NEXT: tbz w8, #5, .LBB24_12 +; CHECK-NEXT: // %bb.11: // %cond.store9 +; CHECK-NEXT: mov z0.d, z30.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z28.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_12: // %else10 +; CHECK-NEXT: ext z31.b, z31.b, z31.b, #8 +; CHECK-NEXT: movprfx z28, z8 +; CHECK-NEXT: lsl z28.d, p0/m, z28.d, z23.d +; CHECK-NEXT: tbz w8, #6, .LBB24_14 +; CHECK-NEXT: // %bb.13: // %cond.store11 +; CHECK-NEXT: fmov x9, d29 +; CHECK-NEXT: str d26, [x9] +; CHECK-NEXT: .LBB24_14: // %else12 +; CHECK-NEXT: sunpklo z30.d, z31.s +; CHECK-NEXT: add z28.d, z24.d, z28.d +; CHECK-NEXT: tbz w8, #7, .LBB24_16 +; CHECK-NEXT: // %bb.15: // %cond.store13 +; CHECK-NEXT: mov z0.d, z29.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z26.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_16: // %else14 +; CHECK-NEXT: movprfx z26, z30 +; CHECK-NEXT: lsl z26.d, p0/m, z26.d, z23.d +; CHECK-NEXT: tbz w8, #8, .LBB24_18 +; CHECK-NEXT: // %bb.17: // %cond.store15 +; CHECK-NEXT: fmov x9, d28 +; CHECK-NEXT: str d21, [x9] +; CHECK-NEXT: .LBB24_18: // %else16 +; CHECK-NEXT: sunpklo z29.d, z27.s +; CHECK-NEXT: add z26.d, z24.d, z26.d +; CHECK-NEXT: tbz w8, #9, .LBB24_20 +; CHECK-NEXT: // %bb.19: // %cond.store17 +; CHECK-NEXT: mov z0.d, z28.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z21.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_20: // %else18 +; CHECK-NEXT: ext z27.b, z27.b, z27.b, #8 +; CHECK-NEXT: movprfx z21, z29 +; CHECK-NEXT: lsl z21.d, p0/m, z21.d, z23.d +; CHECK-NEXT: tbz w8, #10, .LBB24_22 +; CHECK-NEXT: // %bb.21: // %cond.store19 +; CHECK-NEXT: fmov x9, d26 +; CHECK-NEXT: str d18, [x9] +; CHECK-NEXT: .LBB24_22: // %else20 +; CHECK-NEXT: sunpklo z27.d, z27.s +; CHECK-NEXT: add z21.d, z24.d, z21.d +; CHECK-NEXT: tbz w8, #11, .LBB24_24 +; CHECK-NEXT: // %bb.23: // %cond.store21 +; CHECK-NEXT: mov z0.d, z26.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z18.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_24: // %else22 +; CHECK-NEXT: movprfx z18, z27 +; CHECK-NEXT: lsl z18.d, p0/m, z18.d, z23.d +; CHECK-NEXT: tbz w8, #12, .LBB24_26 +; CHECK-NEXT: // %bb.25: // %cond.store23 +; CHECK-NEXT: fmov x9, d21 +; CHECK-NEXT: str d22, [x9] +; CHECK-NEXT: .LBB24_26: // %else24 +; CHECK-NEXT: sunpklo z26.d, z25.s +; CHECK-NEXT: add z18.d, z24.d, z18.d +; CHECK-NEXT: tbz w8, #13, .LBB24_28 +; CHECK-NEXT: // %bb.27: // %cond.store25 +; CHECK-NEXT: mov z0.d, z21.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z22.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_28: // %else26 +; CHECK-NEXT: ext z25.b, z25.b, z25.b, #8 +; CHECK-NEXT: movprfx z21, z26 +; CHECK-NEXT: lsl z21.d, p0/m, z21.d, z23.d +; CHECK-NEXT: tbz w8, #14, .LBB24_30 +; CHECK-NEXT: // %bb.29: // %cond.store27 +; CHECK-NEXT: fmov x9, d18 +; CHECK-NEXT: str d19, [x9] +; CHECK-NEXT: .LBB24_30: // %else28 +; CHECK-NEXT: sunpklo z22.d, z25.s +; CHECK-NEXT: add z21.d, z24.d, z21.d +; CHECK-NEXT: tbz w8, #15, .LBB24_32 +; CHECK-NEXT: // %bb.31: // %cond.store29 +; CHECK-NEXT: mov z0.d, z18.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z19.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_32: // %else30 +; CHECK-NEXT: movprfx z18, z22 +; CHECK-NEXT: lsl z18.d, p0/m, z18.d, z23.d +; CHECK-NEXT: tbz w8, #16, .LBB24_34 +; CHECK-NEXT: // %bb.33: // %cond.store31 +; CHECK-NEXT: fmov x9, d21 +; CHECK-NEXT: str d17, [x9] +; CHECK-NEXT: .LBB24_34: // %else32 +; CHECK-NEXT: sunpklo z19.d, z20.s +; CHECK-NEXT: add z18.d, z24.d, z18.d +; CHECK-NEXT: tbz w8, #17, .LBB24_36 +; CHECK-NEXT: // %bb.35: // %cond.store33 +; CHECK-NEXT: mov z0.d, z21.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z17.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_36: // %else34 +; CHECK-NEXT: ext z20.b, z20.b, z20.b, #8 +; CHECK-NEXT: movprfx z17, z19 +; CHECK-NEXT: lsl z17.d, p0/m, z17.d, z23.d +; CHECK-NEXT: tbz w8, #18, .LBB24_38 +; CHECK-NEXT: // %bb.37: // %cond.store35 +; CHECK-NEXT: fmov x9, d18 +; CHECK-NEXT: str d7, [x9] +; CHECK-NEXT: .LBB24_38: // %else36 +; CHECK-NEXT: sunpklo z19.d, z20.s +; CHECK-NEXT: add z17.d, z24.d, z17.d +; CHECK-NEXT: tbz w8, #19, .LBB24_40 +; CHECK-NEXT: // %bb.39: // %cond.store37 +; CHECK-NEXT: mov z0.d, z18.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z7.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_40: // %else38 +; CHECK-NEXT: movprfx z7, z19 +; CHECK-NEXT: lsl z7.d, p0/m, z7.d, z23.d +; CHECK-NEXT: tbz w8, #20, .LBB24_42 +; CHECK-NEXT: // %bb.41: // %cond.store39 +; CHECK-NEXT: fmov x9, d17 +; CHECK-NEXT: str d4, [x9] +; CHECK-NEXT: .LBB24_42: // %else40 +; CHECK-NEXT: sunpklo z18.d, z16.s +; CHECK-NEXT: add z7.d, z24.d, z7.d +; CHECK-NEXT: tbz w8, #21, .LBB24_44 +; CHECK-NEXT: // %bb.43: // %cond.store41 +; CHECK-NEXT: mov z0.d, z17.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z4.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_44: // %else42 +; CHECK-NEXT: ext z16.b, z16.b, z16.b, #8 +; CHECK-NEXT: movprfx z4, z18 +; CHECK-NEXT: lsl z4.d, p0/m, z4.d, z23.d +; CHECK-NEXT: tbz w8, #22, .LBB24_46 +; CHECK-NEXT: // %bb.45: // %cond.store43 +; CHECK-NEXT: fmov x9, d7 +; CHECK-NEXT: str d2, [x9] +; CHECK-NEXT: .LBB24_46: // %else44 +; CHECK-NEXT: sunpklo z16.d, z16.s +; CHECK-NEXT: add z4.d, z24.d, z4.d +; CHECK-NEXT: tbz w8, #23, .LBB24_48 +; CHECK-NEXT: // %bb.47: // %cond.store45 +; CHECK-NEXT: mov z0.d, z7.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z2.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_48: // %else46 +; CHECK-NEXT: movprfx z2, z16 +; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z23.d +; CHECK-NEXT: tbz w8, #24, .LBB24_50 +; CHECK-NEXT: // %bb.49: // %cond.store47 +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: str d5, [x9] +; CHECK-NEXT: .LBB24_50: // %else48 +; CHECK-NEXT: sunpklo z7.d, z6.s +; CHECK-NEXT: add z2.d, z24.d, z2.d +; CHECK-NEXT: tbz w8, #25, .LBB24_52 +; CHECK-NEXT: // %bb.51: // %cond.store49 +; CHECK-NEXT: mov z0.d, z4.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z5.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_52: // %else50 +; CHECK-NEXT: ext z6.b, z6.b, z6.b, #8 +; CHECK-NEXT: movprfx z4, z7 +; CHECK-NEXT: lsl z4.d, p0/m, z4.d, z23.d +; CHECK-NEXT: tbz w8, #26, .LBB24_54 +; CHECK-NEXT: // %bb.53: // %cond.store51 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: str d3, [x9] +; CHECK-NEXT: .LBB24_54: // %else52 +; CHECK-NEXT: sunpklo z5.d, z6.s +; CHECK-NEXT: add z4.d, z24.d, z4.d +; CHECK-NEXT: tbz w8, #27, .LBB24_56 +; CHECK-NEXT: // %bb.55: // %cond.store53 +; CHECK-NEXT: mov z0.d, z2.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z3.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: .LBB24_56: // %else54 +; CHECK-NEXT: movprfx z2, z5 +; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z23.d +; CHECK-NEXT: tbnz w8, #28, .LBB24_62 +; CHECK-NEXT: // %bb.57: // %else56 +; CHECK-NEXT: add z2.d, z24.d, z2.d +; CHECK-NEXT: tbnz w8, #29, .LBB24_63 +; CHECK-NEXT: .LBB24_58: // %else58 +; CHECK-NEXT: tbnz w8, #30, .LBB24_64 +; CHECK-NEXT: .LBB24_59: // %else60 +; CHECK-NEXT: tbz w8, #31, .LBB24_61 +; CHECK-NEXT: .LBB24_60: // %cond.store61 +; CHECK-NEXT: add x9, sp, #32 +; CHECK-NEXT: mov z0.d, z2.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: ldr z0, [x9] // 16-byte Folded Reload +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: str d0, [x8] +; CHECK-NEXT: .LBB24_61: // %else62 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #32 +; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x21, [sp, #64] // 16-byte Folded Reload +; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp d15, d14, [sp], #96 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB24_62: // %cond.store55 +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: str d1, [x9] +; CHECK-NEXT: add z2.d, z24.d, z2.d +; CHECK-NEXT: tbz w8, #29, .LBB24_58 +; CHECK-NEXT: .LBB24_63: // %cond.store57 +; CHECK-NEXT: mov z0.d, z4.d[1] +; CHECK-NEXT: fmov x9, d0 +; CHECK-NEXT: mov z0.d, z1.d[1] +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: tbz w8, #30, .LBB24_59 +; CHECK-NEXT: .LBB24_64: // %cond.store59 +; CHECK-NEXT: add x10, sp, #32 +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: ldr z0, [x10] // 16-byte Folded Reload +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: tbnz w8, #31, .LBB24_60 +; CHECK-NEXT: b .LBB24_61 + %vals = load <32 x double>, <32 x double>* %a + %idxs = load <32 x i32>, <32 x i32>* %b + %ext = sext <32 x i32> %idxs to <32 x i64> + %ptrs = getelementptr double, double* %base, <32 x i64> %ext + %mask = fcmp oeq <32 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v32f64(<32 x double> %vals, <32 x double*> %ptrs, i32 8, <32 x i1> %mask) + ret void +} + +; extract_subvec(...(insert_subvec(a,b,c))) -> extract_subvec(bitcast(b),d) like +; combines can effectively unlegalise bitcast operations. This test ensures such +; combines do not happen after operation legalisation. When not prevented the +; test triggers infinite combine->legalise->combine->... +; +; NOTE: For this test to function correctly it's critical for %vals to be in a +; different block to the scatter store. If not, the problematic bitcast will be +; removed before operation legalisation and thus not exercise the combine. +define void @masked_scatter_bitcast_infinite_loop(<8 x double>* %a, <8 x double*>* %b, i1 %cond) #0 { +; CHECK-LABEL: masked_scatter_bitcast_infinite_loop: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldr q0, [x0, #48] +; CHECK-NEXT: ldr q1, [x0, #32] +; CHECK-NEXT: ldr q2, [x0, #16] +; CHECK-NEXT: ldr q3, [x0] +; CHECK-NEXT: tbz w2, #0, .LBB25_10 +; CHECK-NEXT: // %bb.1: // %bb.1 +; CHECK-NEXT: adrp x8, .LCPI25_0 +; CHECK-NEXT: ptrue p1.d, vl2 +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI25_0] +; CHECK-NEXT: fcmeq p2.d, p1/z, z2.d, z4.d +; CHECK-NEXT: mov z5.d, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p2.d, p1/z, z3.d, z4.d +; CHECK-NEXT: mov z6.d, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: fcmeq p2.d, p1/z, z1.d, z4.d +; CHECK-NEXT: fcmeq p1.d, p1/z, z0.d, z4.d +; CHECK-NEXT: mov z4.d, p2/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: mov z7.d, p1/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: uzp1 z5.s, z5.s, z5.s +; CHECK-NEXT: uzp1 z6.s, z6.s, z6.s +; CHECK-NEXT: uzp1 z4.s, z4.s, z4.s +; CHECK-NEXT: uzp1 z7.s, z7.s, z7.s +; CHECK-NEXT: splice z6.s, p0, z6.s, z5.s +; CHECK-NEXT: splice z4.s, p0, z4.s, z7.s +; CHECK-NEXT: ptrue p2.h, vl4 +; CHECK-NEXT: uzp1 z5.h, z6.h, z6.h +; CHECK-NEXT: uzp1 z4.h, z4.h, z4.h +; CHECK-NEXT: splice z5.h, p2, z5.h, z4.h +; CHECK-NEXT: uzp1 z4.b, z5.b, z5.b +; CHECK-NEXT: mov z5.b, z4.b[1] +; CHECK-NEXT: fmov w9, s4 +; CHECK-NEXT: mov z6.b, z4.b[2] +; CHECK-NEXT: fmov w8, s5 +; CHECK-NEXT: mov z7.b, z4.b[3] +; CHECK-NEXT: fmov w10, s6 +; CHECK-NEXT: mov z16.b, z4.b[4] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: fmov w11, s7 +; CHECK-NEXT: bfi w9, w8, #1, #1 +; CHECK-NEXT: fmov w8, s16 +; CHECK-NEXT: bfi w9, w10, #2, #1 +; CHECK-NEXT: mov z17.b, z4.b[5] +; CHECK-NEXT: mov z18.b, z4.b[6] +; CHECK-NEXT: bfi w9, w11, #3, #1 +; CHECK-NEXT: fmov w10, s17 +; CHECK-NEXT: bfi w9, w8, #4, #1 +; CHECK-NEXT: fmov w8, s18 +; CHECK-NEXT: mov z4.b, z4.b[7] +; CHECK-NEXT: bfi w9, w10, #5, #1 +; CHECK-NEXT: fmov w10, s4 +; CHECK-NEXT: and w8, w8, #0x1 +; CHECK-NEXT: ldp q5, q4, [x1, #32] +; CHECK-NEXT: orr w8, w9, w8, lsl #6 +; CHECK-NEXT: orr w9, w8, w10, lsl #7 +; CHECK-NEXT: and w8, w9, #0xff +; CHECK-NEXT: ldp q7, q6, [x1] +; CHECK-NEXT: tbnz w9, #0, .LBB25_11 +; CHECK-NEXT: // %bb.2: // %else +; CHECK-NEXT: tbnz w8, #1, .LBB25_12 +; CHECK-NEXT: .LBB25_3: // %else2 +; CHECK-NEXT: tbnz w8, #2, .LBB25_13 +; CHECK-NEXT: .LBB25_4: // %else4 +; CHECK-NEXT: tbnz w8, #3, .LBB25_14 +; CHECK-NEXT: .LBB25_5: // %else6 +; CHECK-NEXT: tbnz w8, #4, .LBB25_15 +; CHECK-NEXT: .LBB25_6: // %else8 +; CHECK-NEXT: tbnz w8, #5, .LBB25_16 +; CHECK-NEXT: .LBB25_7: // %else10 +; CHECK-NEXT: tbnz w8, #6, .LBB25_17 +; CHECK-NEXT: .LBB25_8: // %else12 +; CHECK-NEXT: tbz w8, #7, .LBB25_10 +; CHECK-NEXT: .LBB25_9: // %cond.store13 +; CHECK-NEXT: mov z1.d, z4.d[1] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: fmov x8, d1 +; CHECK-NEXT: str d0, [x8] +; CHECK-NEXT: .LBB25_10: // %bb.2 +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB25_11: // %cond.store +; CHECK-NEXT: fmov x9, d7 +; CHECK-NEXT: str d3, [x9] +; CHECK-NEXT: tbz w8, #1, .LBB25_3 +; CHECK-NEXT: .LBB25_12: // %cond.store1 +; CHECK-NEXT: mov z7.d, z7.d[1] +; CHECK-NEXT: mov z3.d, z3.d[1] +; CHECK-NEXT: fmov x9, d7 +; CHECK-NEXT: str d3, [x9] +; CHECK-NEXT: tbz w8, #2, .LBB25_4 +; CHECK-NEXT: .LBB25_13: // %cond.store3 +; CHECK-NEXT: fmov x9, d6 +; CHECK-NEXT: str d2, [x9] +; CHECK-NEXT: tbz w8, #3, .LBB25_5 +; CHECK-NEXT: .LBB25_14: // %cond.store5 +; CHECK-NEXT: mov z3.d, z6.d[1] +; CHECK-NEXT: mov z2.d, z2.d[1] +; CHECK-NEXT: fmov x9, d3 +; CHECK-NEXT: str d2, [x9] +; CHECK-NEXT: tbz w8, #4, .LBB25_6 +; CHECK-NEXT: .LBB25_15: // %cond.store7 +; CHECK-NEXT: fmov x9, d5 +; CHECK-NEXT: str d1, [x9] +; CHECK-NEXT: tbz w8, #5, .LBB25_7 +; CHECK-NEXT: .LBB25_16: // %cond.store9 +; CHECK-NEXT: mov z2.d, z5.d[1] +; CHECK-NEXT: mov z1.d, z1.d[1] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: str d1, [x9] +; CHECK-NEXT: tbz w8, #6, .LBB25_8 +; CHECK-NEXT: .LBB25_17: // %cond.store11 +; CHECK-NEXT: fmov x9, d4 +; CHECK-NEXT: str d0, [x9] +; CHECK-NEXT: tbnz w8, #7, .LBB25_9 +; CHECK-NEXT: b .LBB25_10 + %vals = load volatile <8 x double>, <8 x double>* %a + br i1 %cond, label %bb.1, label %bb.2 + +bb.1: + %ptrs = load <8 x double*>, <8 x double*>* %b + %mask = fcmp oeq <8 x double> %vals, zeroinitializer + call void @llvm.masked.scatter.v8f64(<8 x double> %vals, <8 x double*> %ptrs, i32 8, <8 x i1> %mask) + br label %bb.2 + +bb.2: + ret void +} + +declare void @llvm.masked.scatter.v4i8(<4 x i8>, <4 x i8*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i8(<8 x i8>, <8 x i8*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16i8(<16 x i8>, <16 x i8*>, i32, <16 x i1>) +declare void @llvm.masked.scatter.v32i8(<32 x i8>, <32 x i8*>, i32, <32 x i1>) + +declare void @llvm.masked.scatter.v2i16(<2 x i16>, <2 x i16*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i16(<4 x i16>, <4 x i16*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i16(<8 x i16>, <8 x i16*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16i16(<16 x i16>, <16 x i16*>, i32, <16 x i1>) + +declare void @llvm.masked.scatter.v2i32(<2 x i32>, <2 x i32*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8i32(<8 x i32>, <8 x i32*>, i32, <8 x i1>) + +declare void @llvm.masked.scatter.v1i64(<1 x i64>, <1 x i64*>, i32, <1 x i1>) +declare void @llvm.masked.scatter.v2i64(<2 x i64>, <2 x i64*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4i64(<4 x i64>, <4 x i64*>, i32, <4 x i1>) + +declare void @llvm.masked.scatter.v2f16(<2 x half>, <2 x half*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4f16(<4 x half>, <4 x half*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8f16(<8 x half>, <8 x half*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v16f16(<16 x half>, <16 x half*>, i32, <16 x i1>) + +declare void @llvm.masked.scatter.v2f32(<2 x float>, <2 x float*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4f32(<4 x float>, <4 x float*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8f32(<8 x float>, <8 x float*>, i32, <8 x i1>) + +declare void @llvm.masked.scatter.v1f64(<1 x double>, <1 x double*>, i32, <1 x i1>) +declare void @llvm.masked.scatter.v2f64(<2 x double>, <2 x double*>, i32, <2 x i1>) +declare void @llvm.masked.scatter.v4f64(<4 x double>, <4 x double*>, i32, <4 x i1>) +declare void @llvm.masked.scatter.v8f64(<8 x double>, <8 x double*>, i32, <8 x i1>) +declare void @llvm.masked.scatter.v32f64(<32 x double>, <32 x double*>, i32, <32 x i1>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll @@ -0,0 +1,331 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; i8 +define void @subvector_v4i8(<4 x i8> *%in, <4 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v4i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr s0, [x0] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: st1b { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i8>, <4 x i8>* %in + br label %bb1 + +bb1: + store <4 x i8> %a, <4 x i8>* %out + ret void +} + +define void @subvector_v8i8(<8 x i8> *%in, <8 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v8i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %in + br label %bb1 + +bb1: + store <8 x i8> %a, <8 x i8>* %out + ret void +} + +define void @subvector_v16i8(<16 x i8> *%in, <16 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v16i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %in + br label %bb1 + +bb1: + store <16 x i8> %a, <16 x i8>* %out + ret void +} + +define void @subvector_v32i8(<32 x i8> *%in, <32 x i8>* %out) #0 { +; CHECK-LABEL: subvector_v32i8: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %in + br label %bb1 + +bb1: + store <32 x i8> %a, <32 x i8>* %out + ret void +} + +; i16 +define void @subvector_v2i16(<2 x i16> *%in, <2 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v2i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: sub sp, sp, #16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: ldrh w8, [x0, #2] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: str w8, [sp, #12] +; CHECK-NEXT: ldrh w8, [x0] +; CHECK-NEXT: str w8, [sp, #8] +; CHECK-NEXT: ldr d0, [sp, #8] +; CHECK-NEXT: st1h { z0.s }, p0, [x1] +; CHECK-NEXT: add sp, sp, #16 +; CHECK-NEXT: ret + %a = load <2 x i16>, <2 x i16>* %in + br label %bb1 + +bb1: + store <2 x i16> %a, <2 x i16>* %out + ret void +} + +define void @subvector_v4i16(<4 x i16> *%in, <4 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v4i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i16>, <4 x i16>* %in + br label %bb1 + +bb1: + store <4 x i16> %a, <4 x i16>* %out + ret void +} + +define void @subvector_v8i16(<8 x i16> *%in, <8 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v8i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %in + br label %bb1 + +bb1: + store <8 x i16> %a, <8 x i16>* %out + ret void +} + +define void @subvector_v16i16(<16 x i16> *%in, <16 x i16>* %out) #0 { +; CHECK-LABEL: subvector_v16i16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %in + br label %bb1 + +bb1: + store <16 x i16> %a, <16 x i16>* %out + ret void +} + +; i32 +define void @subvector_v2i32(<2 x i32> *%in, <2 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v2i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i32>, <2 x i32>* %in + br label %bb1 + +bb1: + store <2 x i32> %a, <2 x i32>* %out + ret void +} + +define void @subvector_v4i32(<4 x i32> *%in, <4 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v4i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <4 x i32>, <4 x i32>* %in + br label %bb1 + +bb1: + store <4 x i32> %a, <4 x i32>* %out + ret void +} + +define void @subvector_v8i32(<8 x i32> *%in, <8 x i32>* %out) #0 { +; CHECK-LABEL: subvector_v8i32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + br label %bb1 + +bb1: + store <8 x i32> %a, <8 x i32>* %out + ret void +} + +; i64 +define void @subvector_v2i64(<2 x i64> *%in, <2 x i64>* %out) #0 { +; CHECK-LABEL: subvector_v2i64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <2 x i64>, <2 x i64>* %in + br label %bb1 + +bb1: + store <2 x i64> %a, <2 x i64>* %out + ret void +} + +define void @subvector_v4i64(<4 x i64> *%in, <4 x i64>* %out) #0 { +; CHECK-LABEL: subvector_v4i64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + br label %bb1 + +bb1: + store <4 x i64> %a, <4 x i64>* %out + ret void +} + +; f16 +define void @subvector_v2f16(<2 x half> *%in, <2 x half>* %out) #0 { +; CHECK-LABEL: subvector_v2f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr w8, [x0] +; CHECK-NEXT: str w8, [x1] +; CHECK-NEXT: ret + %a = load <2 x half>, <2 x half>* %in + br label %bb1 + +bb1: + store <2 x half> %a, <2 x half>* %out + ret void +} + +define void @subvector_v4f16(<4 x half> *%in, <4 x half>* %out) #0 { +; CHECK-LABEL: subvector_v4f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <4 x half>, <4 x half>* %in + br label %bb1 + +bb1: + store <4 x half> %a, <4 x half>* %out + ret void +} + +define void @subvector_v8f16(<8 x half> *%in, <8 x half>* %out) #0 { +; CHECK-LABEL: subvector_v8f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x half>, <8 x half>* %in + br label %bb1 + +bb1: + store <8 x half> %a, <8 x half>* %out + ret void +} + +define void @subvector_v16f16(<16 x half> *%in, <16 x half>* %out) #0 { +; CHECK-LABEL: subvector_v16f16: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <16 x half>, <16 x half>* %in + br label %bb1 + +bb1: + store <16 x half> %a, <16 x half>* %out + ret void +} + +; f32 +define void @subvector_v2f32(<2 x float> *%in, <2 x float>* %out) #0 { +; CHECK-LABEL: subvector_v2f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr d0, [x0] +; CHECK-NEXT: str d0, [x1] +; CHECK-NEXT: ret + %a = load <2 x float>, <2 x float>* %in + br label %bb1 + +bb1: + store <2 x float> %a, <2 x float>* %out + ret void +} + +define void @subvector_v4f32(<4 x float> *%in, <4 x float>* %out) #0 { +; CHECK-LABEL: subvector_v4f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <4 x float>, <4 x float>* %in + br label %bb1 + +bb1: + store <4 x float> %a, <4 x float>* %out + ret void +} + +define void @subvector_v8f32(<8 x float> *%in, <8 x float>* %out) #0 { +; CHECK-LABEL: subvector_v8f32: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <8 x float>, <8 x float>* %in + br label %bb1 + +bb1: + store <8 x float> %a, <8 x float>* %out + ret void +} + +; f64 +define void @subvector_v2f64(<2 x double> *%in, <2 x double>* %out) #0 { +; CHECK-LABEL: subvector_v2f64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldr q0, [x0] +; CHECK-NEXT: str q0, [x1] +; CHECK-NEXT: ret + %a = load <2 x double>, <2 x double>* %in + br label %bb1 + +bb1: + store <2 x double> %a, <2 x double>* %out + ret void +} + +define void @subvector_v4f64(<4 x double> *%in, <4 x double>* %out) #0 { +; CHECK-LABEL: subvector_v4f64: +; CHECK: // %bb.0: // %bb1 +; CHECK-NEXT: ldp q0, q1, [x0] +; CHECK-NEXT: stp q0, q1, [x1] +; CHECK-NEXT: ret + %a = load <4 x double>, <4 x double>* %in + br label %bb1 + +bb1: + store <4 x double> %a, <4 x double>* %out + ret void +} + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll @@ -0,0 +1,473 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; +; truncate i16 -> i8 +; + +define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v16i16_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %in + %b = trunc <16 x i16> %a to <16 x i8> + ret <16 x i8> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 { +; CHECK-LABEL: trunc_v32i16_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.b, z1.b, z1.b +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z3.b, z3.b, z3.b +; CHECK-NEXT: uzp1 z2.b, z2.b, z2.b +; CHECK-NEXT: splice z3.b, p0, z3.b, z2.b +; CHECK-NEXT: add z1.b, z3.b, z3.b +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i16>, <32 x i16>* %in + %b = trunc <32 x i16> %a to <32 x i8> + %c = add <32 x i8> %b, %b + store <32 x i8> %c, <32 x i8>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v64i16_v64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl64 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <64 x i16>, <64 x i16>* %in + %b = trunc <64 x i16> %a to <64 x i8> + %c = add <64 x i8> %b, %b + store <64 x i8> %c, <64 x i8>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v128i16_v128i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h, vl128 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl128 +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <128 x i16>, <128 x i16>* %in + %b = trunc <128 x i16> %a to <128 x i8> + %c = add <128 x i8> %b, %b + store <128 x i8> %c, <128 x i8>* %out + ret void +} + +; +; truncate i32 -> i8 +; + +define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v8i32_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + %b = trunc <8 x i32> %a to <8 x i8> + ret <8 x i8> %b +} + +define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 { +; CHECK-LABEL: trunc_v16i32_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p0, z3.h, z2.h +; CHECK-NEXT: ptrue p0.b, vl8 +; CHECK-NEXT: uzp1 z0.b, z3.b, z3.b +; CHECK-NEXT: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <16 x i32>, <16 x i32>* %in + %b = trunc <16 x i32> %a to <16 x i8> + ret <16 x i8> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v32i32_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i32>, <32 x i32>* %in + %b = trunc <32 x i32> %a to <32 x i8> + %c = add <32 x i8> %b, %b + store <32 x i8> %c, <32 x i8>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v64i32_v64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl64 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <64 x i32>, <64 x i32>* %in + %b = trunc <64 x i32> %a to <64 x i8> + %c = add <64 x i8> %b, %b + store <64 x i8> %c, <64 x i8>* %out + ret void +} + +; +; truncate i32 -> i16 +; + +define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v8i32_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl8 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %in + %b = trunc <8 x i32> %a to <8 x i16> + ret <8 x i16> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 { +; CHECK-LABEL: trunc_v16i32_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.h, z1.h, z1.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z3.h, z3.h, z3.h +; CHECK-NEXT: uzp1 z2.h, z2.h, z2.h +; CHECK-NEXT: splice z3.h, p0, z3.h, z2.h +; CHECK-NEXT: add z1.h, z3.h, z3.h +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i32>, <16 x i32>* %in + %b = trunc <16 x i32> %a to <16 x i16> + %c = add <16 x i16> %b, %b + store <16 x i16> %c, <16 x i16>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v32i32_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl32 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i32>, <32 x i32>* %in + %b = trunc <32 x i32> %a to <32 x i16> + %c = add <32 x i16> %b, %b + store <32 x i16> %c, <32 x i16>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v64i32_v64i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s, vl64 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl64 +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <64 x i32>, <64 x i32>* %in + %b = trunc <64 x i32> %a to <64 x i16> + %c = add <64 x i16> %b, %b + store <64 x i16> %c, <64 x i16>* %out + ret void +} + +; +; truncate i64 -> i8 +; + +; NOTE: v4i8 is not legal so result i8 elements are held within i16 containers. +define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v4i64_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + %b = trunc <4 x i64> %a to <4 x i8> + ret <4 x i8> %b +} + +define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %in) #0 { +; CHECK-LABEL: trunc_v8i64_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z1.h, z3.h, z3.h +; CHECK-NEXT: splice z1.h, p0, z1.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z1.b, z1.b +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %in + %b = trunc <8 x i64> %a to <8 x i8> + ret <8 x i8> %b +} + +define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v16i64_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <16 x i64>, <16 x i64>* %in + %b = trunc <16 x i64> %a to <16 x i8> + ret <16 x i8> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v32i64_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b +; CHECK-NEXT: add z0.b, z0.b, z0.b +; CHECK-NEXT: st1b { z0.b }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i64>, <32 x i64>* %in + %b = trunc <32 x i64> %a to <32 x i8> + %c = add <32 x i8> %b, %b + store <32 x i8> %c, <32 x i8>* %out + ret void +} + +; +; truncate i64 -> i16 +; + +define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v4i64_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + %b = trunc <4 x i64> %a to <4 x i16> + ret <4 x i16> %b +} + +define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 { +; CHECK-LABEL: trunc_v8i64_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: uzp1 z1.h, z0.h, z0.h +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: ptrue p0.h, vl4 +; CHECK-NEXT: uzp1 z0.h, z3.h, z3.h +; CHECK-NEXT: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %in + %b = trunc <8 x i64> %a to <8 x i16> + ret <8 x i16> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v16i64_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i64>, <16 x i64>* %in + %b = trunc <16 x i64> %a to <16 x i16> + %c = add <16 x i16> %b, %b + store <16 x i16> %c, <16 x i16>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v32i64_v32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.h, vl32 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i64>, <32 x i64>* %in + %b = trunc <32 x i64> %a to <32 x i16> + %c = add <32 x i16> %b, %b + store <32 x i16> %c, <32 x i16>* %out + ret void +} + +; +; truncate i64 -> i32 +; + +define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) vscale_range(2,0) #0 { +; CHECK-LABEL: trunc_v4i64_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl4 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 +; CHECK-NEXT: ret + %a = load <4 x i64>, <4 x i64>* %in + %b = trunc <4 x i64> %a to <4 x i32> + ret <4 x i32> %b +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 { +; CHECK-LABEL: trunc_v8i64_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #32] +; CHECK-NEXT: ptrue p0.s, vl2 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: ldp q3, q2, [x0] +; CHECK-NEXT: uzp1 z1.s, z1.s, z1.s +; CHECK-NEXT: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: uzp1 z3.s, z3.s, z3.s +; CHECK-NEXT: uzp1 z2.s, z2.s, z2.s +; CHECK-NEXT: splice z3.s, p0, z3.s, z2.s +; CHECK-NEXT: add z1.s, z3.s, z3.s +; CHECK-NEXT: stp q1, q0, [x1] +; CHECK-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %in + %b = trunc <8 x i64> %a to <8 x i32> + %c = add <8 x i32> %b, %b + store <8 x i32> %c, <8 x i32>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) vscale_range(8,0) #0 { +; CHECK-LABEL: trunc_v16i64_v16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl16 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.s, vl16 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %a = load <16 x i64>, <16 x i64>* %in + %b = trunc <16 x i64> %a to <16 x i32> + %c = add <16 x i32> %b, %b + store <16 x i32> %c, <16 x i32>* %out + ret void +} + +; NOTE: Extra 'add' is to prevent the truncate being combined with the store. +define void @trunc_v32i64_v32i32(<32 x i64>* %in, <32 x i32>* %out) vscale_range(16,0) #0 { +; CHECK-LABEL: trunc_v32i64_v32i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s +; CHECK-NEXT: add z0.s, z0.s, z0.s +; CHECK-NEXT: st1w { z0.s }, p0, [x1] +; CHECK-NEXT: ret + %a = load <32 x i64>, <32 x i64>* %in + %b = trunc <32 x i64> %a to <32 x i32> + %c = add <32 x i32> %b, %b + store <32 x i32> %c, <32 x i32>* %out + ret void +} + +attributes #0 = { nounwind "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-vector-shuffle.ll @@ -0,0 +1,356 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; define <4 x i8> @shuffle_ext_byone_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 { +; %ret = shufflevector <4 x i8> %op1, <4 x i8> %op2, <4 x i32> +; ret <4 x i8> %ret +; } + +define <8 x i8> @shuffle_ext_byone_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.b, z0.b[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <8 x i8> %op1, <8 x i8> %op2, <8 x i32> + ret <8 x i8> %ret +} + +define <16 x i8> @shuffle_ext_byone_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <16 x i8> %op1, <16 x i8> %op2, <16 x i32> + ret <16 x i8> %ret +} + +define void @shuffle_ext_byone_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.b, z0.b[15] +; CHECK-NEXT: mov z2.b, z1.b[15] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.b, w8 +; CHECK-NEXT: insr z0.b, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <32 x i8>, <32 x i8>* %a + %op2 = load <32 x i8>, <32 x i8>* %b + %ret = shufflevector <32 x i8> %op1, <32 x i8> %op2, <32 x i32> + store <32 x i8> %ret, <32 x i8>* %a + ret void +} + +; define <2 x i16> @shuffle_ext_byone_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 { +; %ret = shufflevector <2 x i16> %op1, <2 x i16> %op2, <2 x i32> +; ret <2 x i16> %ret +; } + +define <4 x i16> @shuffle_ext_byone_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <4 x i16> %op1, <4 x i16> %op2, <4 x i32> + ret <4 x i16> %ret +} + +define <8 x i16> @shuffle_ext_byone_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <8 x i16> %op1, <8 x i16> %op2, <8 x i32> + ret <8 x i16> %ret +} + +define void @shuffle_ext_byone_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: mov z2.h, z1.h[7] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.h, w8 +; CHECK-NEXT: insr z0.h, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x i16>, <16 x i16>* %a + %op2 = load <16 x i16>, <16 x i16>* %b + %ret = shufflevector <16 x i16> %op1, <16 x i16> %op2, <16 x i32> + store <16 x i16> %ret, <16 x i16>* %a + ret void +} + +define <2 x i32> @shuffle_ext_byone_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <2 x i32> %op1, <2 x i32> %op2, <2 x i32> + ret <2 x i32> %ret +} + +define <4 x i32> @shuffle_ext_byone_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <4 x i32> %op1, <4 x i32> %op2, <4 x i32> + ret <4 x i32> %ret +} + +define void @shuffle_ext_byone_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: mov z2.s, z1.s[3] +; CHECK-NEXT: fmov w8, s0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov w9, s2 +; CHECK-NEXT: insr z1.s, w8 +; CHECK-NEXT: insr z0.s, w9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x i32>, <8 x i32>* %a + %op2 = load <8 x i32>, <8 x i32>* %b + %ret = shufflevector <8 x i32> %op1, <8 x i32> %op2, <8 x i32> + store <8 x i32> %ret, <8 x i32>* %a + ret void +} + +define <2 x i64> @shuffle_ext_byone_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: insr z1.d, x8 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <2 x i64> %op1, <2 x i64> %op2, <2 x i32> + ret <2 x i64> %ret +} + +define void @shuffle_ext_byone_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: mov z2.d, z1.d[1] +; CHECK-NEXT: fmov x8, d0 +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: fmov x9, d2 +; CHECK-NEXT: insr z1.d, x8 +; CHECK-NEXT: insr z0.d, x9 +; CHECK-NEXT: stp q1, q0, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x i64>, <4 x i64>* %a + %op2 = load <4 x i64>, <4 x i64>* %b + %ret = shufflevector <4 x i64> %op1, <4 x i64> %op2, <4 x i32> + store <4 x i64> %ret, <4 x i64>* %a + ret void +} + + +define <4 x half> @shuffle_ext_byone_v4f16(<4 x half> %op1, <4 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[3] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <4 x half> %op1, <4 x half> %op2, <4 x i32> + ret <4 x half> %ret +} + +define <8 x half> @shuffle_ext_byone_v8f16(<8 x half> %op1, <8 x half> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <8 x half> %op1, <8 x half> %op2, <8 x i32> + ret <8 x half> %ret +} + +define void @shuffle_ext_byone_v16f16(<16 x half>* %a, <16 x half>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.h, z1.h[7] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.h, h3 +; CHECK-NEXT: mov z0.h, z0.h[7] +; CHECK-NEXT: insr z1.h, h0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <16 x half>, <16 x half>* %a + %op2 = load <16 x half>, <16 x half>* %b + %ret = shufflevector <16 x half> %op1, <16 x half> %op2, <16 x i32> + store <16 x half> %ret, <16 x half>* %a + ret void +} + +define <2 x float> @shuffle_ext_byone_v2f32(<2 x float> %op1, <2 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 +; CHECK-NEXT: // kill: def $d1 killed $d1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret + %ret = shufflevector <2 x float> %op1, <2 x float> %op2, <2 x i32> + ret <2 x float> %ret +} + +define <4 x float> @shuffle_ext_byone_v4f32(<4 x float> %op1, <4 x float> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <4 x float> %op1, <4 x float> %op2, <4 x i32> + ret <4 x float> %ret +} + +define void @shuffle_ext_byone_v8f32(<8 x float>* %a, <8 x float>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.s, z1.s[3] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.s, s3 +; CHECK-NEXT: mov z0.s, z0.s[3] +; CHECK-NEXT: insr z1.s, s0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <8 x float>, <8 x float>* %a + %op2 = load <8 x float>, <8 x float>* %b + %ret = shufflevector <8 x float> %op1, <8 x float> %op2, <8 x i32> + store <8 x float> %ret, <8 x float>* %a + ret void +} + +define <2 x double> @shuffle_ext_byone_v2f64(<2 x double> %op1, <2 x double> %op2) #0 { +; CHECK-LABEL: shuffle_ext_byone_v2f64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0 +; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: orr q0.d, z1.d, z0.d +; CHECK-NEXT: ret + %ret = shufflevector <2 x double> %op1, <2 x double> %op2, <2 x i32> + ret <2 x double> %ret +} + +define void @shuffle_ext_byone_v4f64(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_v4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x1] +; CHECK-NEXT: mov z3.d, z1.d[1] +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: insr z2.d, d3 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_byone_reverse(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_byone_reverse: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q1, q2, [x0] +; CHECK-NEXT: mov z3.d, z1.d[1] +; CHECK-NEXT: ldr q0, [x1, #16] +; CHECK-NEXT: insr z2.d, d3 +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: insr z1.d, d0 +; CHECK-NEXT: stp q1, q2, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +define void @shuffle_ext_invalid(<4 x double>* %a, <4 x double>* %b) #0 { +; CHECK-LABEL: shuffle_ext_invalid: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr q0, [x0, #16] +; CHECK-NEXT: ldr q1, [x1] +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %op1 = load <4 x double>, <4 x double>* %a + %op2 = load <4 x double>, <4 x double>* %b + %ret = shufflevector <4 x double> %op1, <4 x double> %op2, <4 x i32> + store <4 x double> %ret, <4 x double>* %a + ret void +} + +attributes #0 = { "target-features"="+sve" }