diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -981,6 +981,7 @@ SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFixedLengthVectorMLoadToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const; SDValue LowerPredReductionToSVE(SDValue ScalarOp, SelectionDAG &DAG) const; SDValue LowerReductionToSVE(unsigned Opcode, SDValue ScalarOp, @@ -988,6 +989,8 @@ SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFixedLengthVectorMStoreToSVE(SDValue Op, + SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthExtractVectorElt(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1109,9 +1109,6 @@ } if (Subtarget->hasSVE()) { - // FIXME: Add custom lowering of MLOAD to handle different passthrus (not a - // splat of 0 or undef) once vector selects supported in SVE codegen. See - // D68877 for more details. for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) { setOperationAction(ISD::BITREVERSE, VT, Custom); setOperationAction(ISD::BSWAP, VT, Custom); @@ -1179,6 +1176,15 @@ setOperationAction(ISD::SINT_TO_FP, VT, Custom); setOperationAction(ISD::UINT_TO_FP, VT, Custom); } + + // NEON doesn't support masked loads or stores, but SVE does + for (auto VT : + {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v1f64, + MVT::v2f64, MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, + MVT::v2i32, MVT::v4i32, MVT::v1i64, MVT::v2i64}) { + setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Custom); + } } for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, @@ -1461,6 +1467,8 @@ setOperationAction(ISD::FSUB, VT, Custom); setOperationAction(ISD::FTRUNC, VT, Custom); setOperationAction(ISD::LOAD, VT, Custom); + setOperationAction(ISD::MLOAD, VT, Custom); + setOperationAction(ISD::MSTORE, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); setOperationAction(ISD::MULHS, VT, Custom); setOperationAction(ISD::MULHU, VT, Custom); @@ -4463,6 +4471,7 @@ case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG); case ISD::CONCAT_VECTORS: + return LowerCONCAT_VECTORS(Op, DAG); case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); @@ -4542,6 +4551,11 @@ return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::MSTORE: + if (useSVEForFixedLengthVectorVT( + cast(Op)->getValue().getValueType(), true)) + return LowerFixedLengthVectorMStoreToSVE(Op, DAG); + return SDValue(); case ISD::MGATHER: return LowerMGATHER(Op, DAG); case ISD::MSCATTER: @@ -4585,6 +4599,10 @@ } case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); + case ISD::MLOAD: + if (useSVEForFixedLengthVectorVT(Op.getValueType(), true)) + return LowerFixedLengthVectorMLoadToSVE(Op, DAG); + return SDValue(); case ISD::LOAD: if (useSVEForFixedLengthVectorVT(Op.getValueType())) return LowerFixedLengthVectorLoadToSVE(Op, DAG); @@ -17185,6 +17203,65 @@ return DAG.getMergeValues(MergedValues, DL); } +static SDValue convertFixedMaskToScalableVector(SDValue Mask, + SelectionDAG &DAG) { + SDLoc DL(Mask); + EVT InVT = Mask.getValueType(); + EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT); + + auto Op1 = convertToScalableVector(DAG, ContainerVT, Mask); + auto Op2 = DAG.getConstant(0, DL, ContainerVT); + auto Pg = getPredicateForFixedLengthVector(DAG, DL, InVT); + + EVT CmpVT = Pg.getValueType(); + auto Cmp = DAG.getNode(AArch64ISD::SETCC_MERGE_ZERO, DL, CmpVT, + {Pg, Op1, Op2, DAG.getCondCode(ISD::SETNE)}); + + return Cmp; +} + +// Convert all fixed length vector loads larger than NEON to masked_loads. +SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE( + SDValue Op, SelectionDAG &DAG) const { + auto Load = cast(Op); + + SDLoc DL(Op); + EVT VT = Op.getValueType(); + EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); + + SDValue Mask = convertFixedMaskToScalableVector(Load->getMask(), DAG); + + SDValue PassThru; + bool IsPassThruZeroOrUndef = false; + + if (Load->getPassThru()->isUndef()) { + PassThru = DAG.getUNDEF(ContainerVT); + IsPassThruZeroOrUndef = true; + } else { + if (ContainerVT.isInteger()) + PassThru = DAG.getConstant(0, DL, ContainerVT); + else + PassThru = DAG.getConstantFP(0, DL, ContainerVT); + if (isZerosVector(Load->getPassThru().getNode())) + IsPassThruZeroOrUndef = true; + } + + auto NewLoad = DAG.getMaskedLoad( + ContainerVT, DL, Load->getChain(), Load->getBasePtr(), Load->getOffset(), + Mask, PassThru, Load->getMemoryVT(), Load->getMemOperand(), + Load->getAddressingMode(), Load->getExtensionType()); + + if (!IsPassThruZeroOrUndef) { + SDValue OldPassThru = + convertToScalableVector(DAG, ContainerVT, Load->getPassThru()); + NewLoad = DAG.getSelect(DL, ContainerVT, Mask, NewLoad, OldPassThru); + } + + auto Result = convertFromScalableVector(DAG, VT, NewLoad); + SDValue MergedValues[2] = {Result, Load->getChain()}; + return DAG.getMergeValues(MergedValues, DL); +} + // Convert all fixed length vector stores larger than NEON to masked_stores. SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE( SDValue Op, SelectionDAG &DAG) const { @@ -17202,6 +17279,23 @@ Store->isTruncatingStore()); } +SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE( + SDValue Op, SelectionDAG &DAG) const { + auto Store = cast(Op); + + SDLoc DL(Op); + EVT VT = Store->getValue().getValueType(); + EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); + + auto NewValue = convertToScalableVector(DAG, ContainerVT, Store->getValue()); + SDValue Mask = convertFixedMaskToScalableVector(Store->getMask(), DAG); + + return DAG.getMaskedStore( + Store->getChain(), DL, NewValue, Store->getBasePtr(), Store->getOffset(), + Mask, Store->getMemoryVT(), Store->getMemOperand(), + Store->getAddressingMode(), Store->isTruncatingStore()); +} + SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE( SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -224,9 +224,13 @@ } bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) { - if (isa(DataType) || !ST->hasSVE()) + if (!ST->hasSVE()) return false; + // For fixed vectors, aqvoid scalarization if using SVE for them. + if (isa(DataType) && !ST->useSVEForFixedLengthVectors()) + return false; // Fall back to scalarization of masked operations. + return isLegalElementTypeForSVE(DataType->getScalarType()); } diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll @@ -0,0 +1,399 @@ +; RUN: llc -aarch64-sve-vector-bits-min=128 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE +; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=384 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=640 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=768 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=896 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 + +target triple = "aarch64-unknown-linux-gnu" + +; Don't use SVE when its registers are no bigger than NEON. +; NO_SVE-NOT: ptrue + +; +; Masked Loads +; + +define <2 x float> @masked_load_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 { +; CHECK-LABEL: masked_load_v2f32: +; CHECK: ptrue p{{[0-9]+}}.s, vl2 +; CHECK: fcmeq v[[N:[0-9]+]].2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s +; CHECK: ld1w { z0.s }, p[[N]]/z, [x0] +; CHECK: ret + %a = load <2 x float>, <2 x float>* %ap + %b = load <2 x float>, <2 x float>* %bp + %mask = fcmp oeq <2 x float> %a, %b + %load = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %ap, i32 8, <2 x i1> %mask, <2 x float> zeroinitializer) + ret <2 x float> %load +} + +define <4 x float> @masked_load_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 { +; CHECK-LABEL: masked_load_v4f32: +; CHECK: ptrue p{{[0-9]+}}.s, vl4 +; CHECK: fcmeq v[[N:[0-9]+]].4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s +; CHECK: ld1w { z0.s }, p[[N]]/z, [x0] +; CHECK: ret + %a = load <4 x float>, <4 x float>* %ap + %b = load <4 x float>, <4 x float>* %bp + %mask = fcmp oeq <4 x float> %a, %b + %load = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %ap, i32 8, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load +} + +define <8 x float> @masked_load_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 { +; CHECK-LABEL: masked_load_v8f32: +; VBITS_GE_256: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]] +; VBITS_GE_256: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0:z[0-9]+]].s, [[Z1:z[0-9]+]].s +; VBITS_GE_256: ld1w { [[Z0]].s }, [[PG1]]/z, [x0] +; VBITS_GE_256: ret + %a = load <8 x float>, <8 x float>* %ap + %b = load <8 x float>, <8 x float>* %bp + %mask = fcmp oeq <8 x float> %a, %b + %load = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %ap, i32 8, <8 x i1> %mask, <8 x float> zeroinitializer) + ret <8 x float> %load +} + +define <16 x float> @masked_load_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0 { +; CHECK-LABEL: masked_load_v16f32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]] +; VBITS_GE_512: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0:z[0-9]+]].s, z{{[0-9]+}}.s +; VBITS_GE_512: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: ret + %a = load <16 x float>, <16 x float>* %ap + %b = load <16 x float>, <16 x float>* %bp + %mask = fcmp oeq <16 x float> %a, %b + %load = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %ap, i32 8, <16 x i1> %mask, <16 x float> zeroinitializer) + ret <16 x float> %load +} + +define <32 x float> @masked_load_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0 { +; CHECK-LABEL: masked_load_v32f32: +; VBITS_GE_1024: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]] +; VBITS_GE_1024: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0:z[0-9]+]].s, z{{[0-9]+}}.s +; VBITS_GE_1024: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_1024: ret + %a = load <32 x float>, <32 x float>* %ap + %b = load <32 x float>, <32 x float>* %bp + %mask = fcmp oeq <32 x float> %a, %b + %load = call <32 x float> @llvm.masked.load.v32f32(<32 x float>* %ap, i32 8, <32 x i1> %mask, <32 x float> zeroinitializer) + ret <32 x float> %load +} + +define <64 x float> @masked_load_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0 { +; CHECK-LABEL: masked_load_v64f32: +; VBITS_GE_2048: ptrue [[PG0:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]] +; VBITS_GE_2048: fcmeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0:z[0-9]+]].s, z{{[0-9]+}}.s +; VBITS_GE_2048: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_2048: ret + + %a = load <64 x float>, <64 x float>* %ap + %b = load <64 x float>, <64 x float>* %bp + %mask = fcmp oeq <64 x float> %a, %b + %load = call <64 x float> @llvm.masked.load.v64f32(<64 x float>* %ap, i32 8, <64 x i1> %mask, <64 x float> zeroinitializer) + ret <64 x float> %load +} + +define <64 x i8> @masked_load_v64i8(<64 x i8>* %ap, <64 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_v64i8: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl64 +; VBITS_GE_512: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0:z[0-9]+]].b, z{{[0-9]+}}.b +; VBITS_GE_512: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: ret + %a = load <64 x i8>, <64 x i8>* %ap + %b = load <64 x i8>, <64 x i8>* %bp + %mask = icmp eq <64 x i8> %a, %b + %load = call <64 x i8> @llvm.masked.load.v64i8(<64 x i8>* %ap, i32 8, <64 x i1> %mask, <64 x i8> undef) + ret <64 x i8> %load +} + +define <32 x i16> @masked_load_v32i16(<32 x i16>* %ap, <32 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_v32i16: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_512: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0:z[0-9]+]].h, z{{[0-9]+}}.h +; VBITS_GE_512: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: ret + %a = load <32 x i16>, <32 x i16>* %ap + %b = load <32 x i16>, <32 x i16>* %bp + %mask = icmp eq <32 x i16> %a, %b + %load = call <32 x i16> @llvm.masked.load.v32i16(<32 x i16>* %ap, i32 8, <32 x i1> %mask, <32 x i16> undef) + ret <32 x i16> %load +} + +define <16 x i32> @masked_load_v16i32(<16 x i32>* %ap, <16 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_v16i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_512: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0:z[0-9]+]].s, z{{[0-9]+}}.s +; VBITS_GE_512: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: ret + %a = load <16 x i32>, <16 x i32>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %mask = icmp eq <16 x i32> %a, %b + %load = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %ap, i32 8, <16 x i1> %mask, <16 x i32> undef) + ret <16 x i32> %load +} + +define <8 x i64> @masked_load_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_v8i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0:z[0-9]+]].d, z{{[0-9]+}}.d +; VBITS_GE_512: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: ret + %a = load <8 x i64>, <8 x i64>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %mask = icmp eq <8 x i64> %a, %b + %load = call <8 x i64> @llvm.masked.load.v8i64(<8 x i64>* %ap, i32 8, <8 x i1> %mask, <8 x i64> undef) + ret <8 x i64> %load +} + +define <8 x i64> @masked_load_passthru_v8i64(<8 x i64>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_passthru_v8i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0:z[0-9]+]].d, [[Z1:z[0-9]+]].d +; VBITS_GE_512: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: sel z{{[0-9]+}}.d, [[PG1]], [[Z0]].d, [[Z1]].d +; VBITS_GE_512: ret + %a = load <8 x i64>, <8 x i64>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %mask = icmp eq <8 x i64> %a, %b + %load = call <8 x i64> @llvm.masked.load.v8i64(<8 x i64>* %ap, i32 8, <8 x i1> %mask, <8 x i64> %b) + ret <8 x i64> %load +} + +define <8 x double> @masked_load_passthru_v8f64(<8 x double>* %ap, <8 x double>* %bp) #0 { +; CHECK-LABEL: masked_load_passthru_v8f64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512: fcmeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0:z[0-9]+]].d, [[Z1:z[0-9]+]].d +; VBITS_GE_512: ld1d { [[Z0]].d }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: sel z{{[0-9]+}}.d, [[PG1]], [[Z0]].d, [[Z1]].d +; VBITS_GE_512: ret + %a = load <8 x double>, <8 x double>* %ap + %b = load <8 x double>, <8 x double>* %bp + %mask = fcmp oeq <8 x double> %a, %b + %load = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %ap, i32 8, <8 x i1> %mask, <8 x double> %b) + ret <8 x double> %load +} + +define <32 x i16> @masked_load_sext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v32i8i16: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl32 +; VBITS_GE_512: cmpeq p{{[0-9]+}}.b, [[PG0]]/z, [[Z0:z[0-9]+]].b, z{{[0-9]+}}.b +; VBITS_GE_512: ld1b { [[Z0]].b }, p{{[0-9]+}}/z, [x{{[0-9]+}}] +; VBITS_GE_512: sunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512: ret + %a = load <32 x i8>, <32 x i8>* %ap + %b = load <32 x i8>, <32 x i8>* %bp + %mask = icmp eq <32 x i8> %a, %b + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef) + %ext = sext <32 x i8> %load to <32 x i16> + ret <32 x i16> %ext +} + +define <16 x i32> @masked_load_sext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v16i8i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl16 +; VBITS_GE_512: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b +; VBITS_GE_512: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0 +; VBITS_GE_512: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: sunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: ret + %a = load <16 x i8>, <16 x i8>* %ap + %b = load <16 x i8>, <16 x i8>* %bp + %mask = icmp eq <16 x i8> %a, %b + %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef) + %ext = sext <16 x i8> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_sext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v8i8i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl8 +; VBITS_GE_512: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b +; VBITS_GE_512: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0 +; VBITS_GE_512: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: sunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: sunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512: ret + %a = load <8 x i8>, <8 x i8>* %ap + %b = load <8 x i8>, <8 x i8>* %bp + %mask = icmp eq <8 x i8> %a, %b + %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef) + %ext = sext <8 x i8> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <16 x i32> @masked_load_sext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v16i16i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl16 +; VBITS_GE_512: cmpeq p{{[0-9]+}}.h, [[PG0]]/z, [[Z0:z[0-9]+]].h, z{{[0-9]+}}.h +; VBITS_GE_512: ld1h { [[Z0]].h }, p{{[0-9]+}}/z, [x{{[0-9]+}}] +; VBITS_GE_512: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: ret + %a = load <16 x i16>, <16 x i16>* %ap + %b = load <16 x i16>, <16 x i16>* %bp + %mask = icmp eq <16 x i16> %a, %b + %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef) + %ext = sext <16 x i16> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_sext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v8i16i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl8 +; VBITS_GE_512: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h +; VBITS_GE_512: cmpne p[[PG:[0-9]+]].h, p0/z, z[[V]].h, #0 +; VBITS_GE_512: ld1h { [[Z0]].h }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: sunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512: ret + %a = load <8 x i16>, <8 x i16>* %ap + %b = load <8 x i16>, <8 x i16>* %bp + %mask = icmp eq <8 x i16> %a, %b + %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef) + %ext = sext <8 x i16> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <8 x i64> @masked_load_sext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v8i32i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8 +; VBITS_GE_512: cmpeq p{{[0-9]+}}.s, [[PG0]]/z, [[Z0:z[0-9]+]].s, z{{[0-9]+}}.s +; VBITS_GE_512: ld1w { [[Z0]].s }, p{{[0-9]+}}/z, [x{{[0-9]+}}] +; VBITS_GE_512: sunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512: ret + %a = load <8 x i32>, <8 x i32>* %ap + %b = load <8 x i32>, <8 x i32>* %bp + %mask = icmp eq <8 x i32> %a, %b + %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef) + %ext = sext <8 x i32> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <32 x i16> @masked_load_zext_v32i8i16(<32 x i8>* %ap, <32 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v32i8i16: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl32 +; VBITS_GE_512: cmpeq p{{[0-9]+}}.b, [[PG0]]/z, [[Z0:z[0-9]+]].b, z{{[0-9]+}}.b +; VBITS_GE_512: ld1b { [[Z0]].b }, p{{[0-9]+}}/z, [x{{[0-9]+}}] +; VBITS_GE_512: uunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512: ret + %a = load <32 x i8>, <32 x i8>* %ap + %b = load <32 x i8>, <32 x i8>* %bp + %mask = icmp eq <32 x i8> %a, %b + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef) + %ext = zext <32 x i8> %load to <32 x i16> + ret <32 x i16> %ext +} + +define <16 x i32> @masked_load_zext_v16i8i32(<16 x i8>* %ap, <16 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v16i8i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl16 +; VBITS_GE_512: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b +; VBITS_GE_512: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0 +; VBITS_GE_512: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: uunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512: uunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: ret + %a = load <16 x i8>, <16 x i8>* %ap + %b = load <16 x i8>, <16 x i8>* %bp + %mask = icmp eq <16 x i8> %a, %b + %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef) + %ext = zext <16 x i8> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_zext_v8i8i64(<8 x i8>* %ap, <8 x i8>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v8i8i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].b, vl8 +; VBITS_GE_512: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b +; VBITS_GE_512: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0 +; VBITS_GE_512: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: uunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512: uunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: uunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512: ret + %a = load <8 x i8>, <8 x i8>* %ap + %b = load <8 x i8>, <8 x i8>* %bp + %mask = icmp eq <8 x i8> %a, %b + %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef) + %ext = zext <8 x i8> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <16 x i32> @masked_load_zext_v16i16i32(<16 x i16>* %ap, <16 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v16i16i32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl16 +; VBITS_GE_512: cmpeq p{{[0-9]+}}.h, [[PG0]]/z, [[Z0:z[0-9]+]].h, z{{[0-9]+}}.h +; VBITS_GE_512: ld1h { [[Z0]].h }, p{{[0-9]+}}/z, [x{{[0-9]+}}] +; VBITS_GE_512: uunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: ret + %a = load <16 x i16>, <16 x i16>* %ap + %b = load <16 x i16>, <16 x i16>* %bp + %mask = icmp eq <16 x i16> %a, %b + %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef) + %ext = zext <16 x i16> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_zext_v8i16i64(<8 x i16>* %ap, <8 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v8i16i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl8 +; VBITS_GE_512: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h +; VBITS_GE_512: cmpne p[[PG:[0-9]+]].h, p0/z, z[[V]].h, #0 +; VBITS_GE_512: ld1h { [[Z0]].h }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512: uunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512: uunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512: ret + %a = load <8 x i16>, <8 x i16>* %ap + %b = load <8 x i16>, <8 x i16>* %bp + %mask = icmp eq <8 x i16> %a, %b + %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef) + %ext = zext <8 x i16> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <8 x i64> @masked_load_zext_v8i32i64(<8 x i32>* %ap, <8 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v8i32i64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl8 +; VBITS_GE_512: cmpeq p{{[0-9]+}}.s, [[PG0]]/z, [[Z0:z[0-9]+]].s, z{{[0-9]+}}.s +; VBITS_GE_512: ld1w { [[Z0]].s }, p{{[0-9]+}}/z, [x{{[0-9]+}}] +; VBITS_GE_512: uunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512: ret + %a = load <8 x i32>, <8 x i32>* %ap + %b = load <8 x i32>, <8 x i32>* %bp + %mask = icmp eq <8 x i32> %a, %b + %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef) + %ext = zext <8 x i32> %load to <8 x i64> + ret <8 x i64> %ext +} + +declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>) +declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) +declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>) +declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>) +declare <32 x float> @llvm.masked.load.v32f32(<32 x float>*, i32, <32 x i1>, <32 x float>) +declare <64 x float> @llvm.masked.load.v64f32(<64 x float>*, i32, <64 x i1>, <64 x float>) + +declare <64 x i8> @llvm.masked.load.v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>) +declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>) +declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) +declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>) +declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>) +declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) +declare <8 x i32> @llvm.masked.load.v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) +declare <32 x i16> @llvm.masked.load.v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>) +declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>) +declare <8 x i64> @llvm.masked.load.v8i64(<8 x i64>*, i32, <8 x i1>, <8 x i64>) +declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) + +attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-stores.ll @@ -0,0 +1,234 @@ +; RUN: llc -aarch64-sve-vector-bits-min=128 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE +; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=384 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=640 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=768 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=896 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 +; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_2048,VBITS_GE_1024,VBITS_GE_512,VBITS_GE_256 + +target triple = "aarch64-unknown-linux-gnu" + +; Don't use SVE when its registers are no bigger than NEON. +; NO_SVE-NOT: ptrue + +;; +;; Masked Stores +;; +define void @masked_store_v2f32(<2 x float>* %ap, <2 x float>* %bp) #0 { +; CHECK-LABEL: masked_store_v2f32: +; CHECK: ptrue p[[P0:[0-9]+]].s, vl2 +; CHECK: fcmeq v[[P1:[0-9]+]].2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s +; CHECK: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0 +; CHECK: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}] +; CHECK-NEXT: ret + %a = load <2 x float>, <2 x float>* %ap + %b = load <2 x float>, <2 x float>* %bp + %mask = fcmp oeq <2 x float> %a, %b + call void @llvm.masked.store.v2f32(<2 x float> %a, <2 x float>* %bp, i32 8, <2 x i1> %mask) + ret void +} + +define void @masked_store_v4f32(<4 x float>* %ap, <4 x float>* %bp) #0 { +; CHECK-LABEL: masked_store_v4f32: +; CHECK: ptrue p[[P0:[0-9]+]].s, vl4 +; CHECK: fcmeq v[[P1:[0-9]+]].4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s +; CHECK: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0 +; CHECK: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}] +; CHECK-NEXT: ret + %a = load <4 x float>, <4 x float>* %ap + %b = load <4 x float>, <4 x float>* %bp + %mask = fcmp oeq <4 x float> %a, %b + call void @llvm.masked.store.v4f32(<4 x float> %a, <4 x float>* %bp, i32 8, <4 x i1> %mask) + ret void +} + +define void @masked_store_v8f32(<8 x float>* %ap, <8 x float>* %bp) #0 { +; CHECK-LABEL: masked_store_v8f32: +; VBITS_GE_256: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]] +; VBITS_GE_256: fcmeq p[[P1:[0-9]+]].s, p[[P0]]/z, z{{[0-9]+}}.s, z{{[0-9]+}}.s +; VBITS_GE_256: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0 +; VBITS_GE_256: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_256-NEXT: ret + %a = load <8 x float>, <8 x float>* %ap + %b = load <8 x float>, <8 x float>* %bp + %mask = fcmp oeq <8 x float> %a, %b + call void @llvm.masked.store.v8f32(<8 x float> %a, <8 x float>* %bp, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_store_v16f32(<16 x float>* %ap, <16 x float>* %bp) #0 { +; CHECK-LABEL: masked_store_v16f32: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]] +; VBITS_GE_512: fcmeq p[[P1:[0-9]+]].s, p[[P0]]/z, z{{[0-9]+}}.s, z{{[0-9]+}}.s +; VBITS_GE_512: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0 +; VBITS_GE_512: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <16 x float>, <16 x float>* %ap + %b = load <16 x float>, <16 x float>* %bp + %mask = fcmp oeq <16 x float> %a, %b + call void @llvm.masked.store.v16f32(<16 x float> %a, <16 x float>* %ap, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_store_v32f32(<32 x float>* %ap, <32 x float>* %bp) #0 { +; CHECK-LABEL: masked_store_v32f32: +; VBITS_GE_1024: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]] +; VBITS_GE_1024: fcmeq p[[P1:[0-9]+]].s, p[[P0]]/z, z{{[0-9]+}}.s, z{{[0-9]+}}.s +; VBITS_GE_1024: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0 +; VBITS_GE_1024: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_1024-NEXT: ret + %a = load <32 x float>, <32 x float>* %ap + %b = load <32 x float>, <32 x float>* %bp + %mask = fcmp oeq <32 x float> %a, %b + call void @llvm.masked.store.v32f32(<32 x float> %a, <32 x float>* %ap, i32 8, <32 x i1> %mask) + ret void +} + +define void @masked_store_v64f32(<64 x float>* %ap, <64 x float>* %bp) #0 { +; CHECK-LABEL: masked_store_v64f32: +; VBITS_GE_2048: ptrue p[[P0:[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]] +; VBITS_GE_2048: fcmeq p[[P1:[0-9]+]].s, p[[P0]]/z, z{{[0-9]+}}.s, z{{[0-9]+}}.s +; VBITS_GE_2048: cmpne p[[P2:[0-9]+]].s, p[[P0]]/z, z[[P1]].s, #0 +; VBITS_GE_2048: st1w { z0.s }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_2048-NEXT: ret + %a = load <64 x float>, <64 x float>* %ap + %b = load <64 x float>, <64 x float>* %bp + %mask = fcmp oeq <64 x float> %a, %b + call void @llvm.masked.store.v64f32(<64 x float> %a, <64 x float>* %ap, i32 8, <64 x i1> %mask) + ret void +} + +define void @masked_store_trunc_v8i64i8(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i8>* %dest) #0 { +; CHECK-LABEL: masked_store_trunc_v8i64i8: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].d, vl8 +; VBITS_GE_512: cmpeq p[[P1:[0-9]+]].d, p[[P0]]/z, [[Z0:z[0-9]+]].d, [[Z1:z[0-9]+]].d +; VBITS_GE_512-DAG: uzp1 [[Z1]].s, [[Z1]].s, [[Z1]].s +; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h +; VBITS_GE_512-DAG: uzp1 [[Z1]].b, [[Z1]].b, [[Z1]].b +; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].b, p{{[0-9]+}}/z, [[Z1]].b, #0 +; VBITS_GE_512-DAG: uzp1 [[Z0]].s, [[Z0]].s, [[Z0]].s +; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h +; VBITS_GE_512-DAG: uzp1 [[Z0]].b, [[Z0]].b, [[Z0]].b +; VBITS_GE_512: st1b { [[Z0]].b }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %mask = icmp eq <8 x i64> %a, %b + %val = trunc <8 x i64> %a to <8 x i8> + call void @llvm.masked.store.v8i8(<8 x i8> %val, <8 x i8>* %dest, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_store_trunc_v8i64i16(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i16>* %dest) #0 { +; CHECK-LABEL: masked_store_trunc_v8i64i16: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].d, vl8 +; VBITS_GE_512: cmpeq p[[P1:[0-9]+]].d, p[[P0]]/z, [[Z0:z[0-9]+]].d, [[Z1:z[0-9]+]].d +; VBITS_GE_512-DAG: uzp1 [[Z1]].s, [[Z1]].s, [[Z1]].s +; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h +; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].h, p{{[0-9]+}}/z, [[Z1]].h, #0 +; VBITS_GE_512-DAG: uzp1 [[Z0]].s, [[Z0]].s, [[Z0]].s +; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h +; VBITS_GE_512: st1h { [[Z0]].h }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %mask = icmp eq <8 x i64> %a, %b + %val = trunc <8 x i64> %a to <8 x i16> + call void @llvm.masked.store.v8i16(<8 x i16> %val, <8 x i16>* %dest, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_store_trunc_v8i64i32(<8 x i64>* %ap, <8 x i64>* %bp, <8 x i32>* %dest) #0 { +; CHECK-LABEL: masked_store_trunc_v8i64i32: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].d, vl8 +; VBITS_GE_512: cmpeq p[[P1:[0-9]+]].d, p[[P0]]/z, [[Z0:z[0-9]+]].d, [[Z1:z[0-9]+]].d +; VBITS_GE_512-DAG: uzp1 [[Z1]].s, [[Z1]].s, [[Z1]].s +; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].s, p{{[0-9]+}}/z, [[Z1]].s, #0 +; VBITS_GE_512-DAG: uzp1 [[Z0]].s, [[Z0]].s, [[Z0]].s +; VBITS_GE_512: st1w { [[Z0]].s }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i64>, <8 x i64>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %mask = icmp eq <8 x i64> %a, %b + %val = trunc <8 x i64> %a to <8 x i32> + call void @llvm.masked.store.v8i32(<8 x i32> %val, <8 x i32>* %dest, i32 8, <8 x i1> %mask) + ret void +} + +define void @masked_store_trunc_v16i32i8(<16 x i32>* %ap, <16 x i32>* %bp, <16 x i8>* %dest) #0 { +; CHECK-LABEL: masked_store_trunc_v16i32i8: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].s, vl16 +; VBITS_GE_512: cmpeq p[[P1:[0-9]+]].s, p[[P0]]/z, [[Z0:z[0-9]+]].s, [[Z1:z[0-9]+]].s +; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h +; VBITS_GE_512-DAG: uzp1 [[Z1]].b, [[Z1]].b, [[Z1]].b +; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].b, p{{[0-9]+}}/z, [[Z1]].b, #0 +; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h +; VBITS_GE_512-DAG: uzp1 [[Z0]].b, [[Z0]].b, [[Z0]].b +; VBITS_GE_512: st1b { [[Z0]].b }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <16 x i32>, <16 x i32>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %mask = icmp eq <16 x i32> %a, %b + %val = trunc <16 x i32> %a to <16 x i8> + call void @llvm.masked.store.v16i8(<16 x i8> %val, <16 x i8>* %dest, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_store_trunc_v16i32i16(<16 x i32>* %ap, <16 x i32>* %bp, <16 x i16>* %dest) #0 { +; CHECK-LABEL: masked_store_trunc_v16i32i16: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].s, vl16 +; VBITS_GE_512: cmpeq p[[P1:[0-9]+]].s, p[[P0]]/z, [[Z0:z[0-9]+]].s, [[Z1:z[0-9]+]].s +; VBITS_GE_512-DAG: uzp1 [[Z1]].h, [[Z1]].h, [[Z1]].h +; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].h, p{{[0-9]+}}/z, [[Z1]].h, #0 +; VBITS_GE_512-DAG: uzp1 [[Z0]].h, [[Z0]].h, [[Z0]].h +; VBITS_GE_512: st1h { [[Z0]].h }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <16 x i32>, <16 x i32>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %mask = icmp eq <16 x i32> %a, %b + %val = trunc <16 x i32> %a to <16 x i16> + call void @llvm.masked.store.v16i16(<16 x i16> %val, <16 x i16>* %dest, i32 8, <16 x i1> %mask) + ret void +} + +define void @masked_store_trunc_v32i16i8(<32 x i16>* %ap, <32 x i16>* %bp, <32 x i8>* %dest) #0 { +; CHECK-LABEL: masked_store_trunc_v32i16i8: +; VBITS_GE_512: ptrue p[[P0:[0-9]+]].h, vl32 +; VBITS_GE_512: cmpeq p[[P1:[0-9]+]].h, p[[P0]]/z, [[Z0:z[0-9]+]].h, [[Z1:z[0-9]+]].h +; VBITS_GE_512-DAG: uzp1 [[Z1]].b, [[Z1]].b, [[Z1]].b +; VBITS_GE_512-DAG: cmpne p[[P2:[0-9]+]].b, p{{[0-9]+}}/z, [[Z1]].b, #0 +; VBITS_GE_512-DAG: uzp1 [[Z0]].b, [[Z0]].b, [[Z0]].b +; VBITS_GE_512: st1b { [[Z0]].b }, p[[P2]], [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: ret + %a = load <32 x i16>, <32 x i16>* %ap + %b = load <32 x i16>, <32 x i16>* %bp + %mask = icmp eq <32 x i16> %a, %b + %val = trunc <32 x i16> %a to <32 x i8> + call void @llvm.masked.store.v32i8(<32 x i8> %val, <32 x i8>* %dest, i32 8, <32 x i1> %mask) + ret void +} + +declare void @llvm.masked.store.v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>) +declare void @llvm.masked.store.v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>) +declare void @llvm.masked.store.v8f32(<8 x float>, <8 x float>*, i32, <8 x i1>) +declare void @llvm.masked.store.v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>) +declare void @llvm.masked.store.v32f32(<32 x float>, <32 x float>*, i32, <32 x i1>) +declare void @llvm.masked.store.v64f32(<64 x float>, <64 x float>*, i32, <64 x i1>) + +declare void @llvm.masked.store.v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>) +declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>) +declare void @llvm.masked.store.v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>) +declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>) +declare void @llvm.masked.store.v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>) +declare void @llvm.masked.store.v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>) + +attributes #0 = { "target-features"="+sve" }