Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -309,6 +309,34 @@ return cast(N)->getMemoryVT().getScalarType() == MVT::i32; }]>; +// non-truncating masked store fragment. +def nontrunc_masked_store : + PatFrag<(ops node:$val, node:$ptr, node:$pred), + (masked_st node:$val, node:$ptr, node:$pred), [{ + return !cast(N)->isTruncatingStore(); +}]>; +// truncating masked store fragments. +def trunc_masked_store : + PatFrag<(ops node:$val, node:$ptr, node:$pred), + (masked_st node:$val, node:$ptr, node:$pred), [{ + return cast(N)->isTruncatingStore(); +}]>; +def trunc_masked_store_i8 : + PatFrag<(ops node:$val, node:$ptr, node:$pred), + (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ + return cast(N)->getMemoryVT().getScalarType() == MVT::i8; +}]>; +def trunc_masked_store_i16 : + PatFrag<(ops node:$val, node:$ptr, node:$pred), + (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ + return cast(N)->getMemoryVT().getScalarType() == MVT::i16; +}]>; +def trunc_masked_store_i32 : + PatFrag<(ops node:$val, node:$ptr, node:$pred), + (trunc_masked_store node:$val, node:$ptr, node:$pred), [{ + return cast(N)->getMemoryVT().getScalarType() == MVT::i32; +}]>; + // Node definitions. def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>; def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>; Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1108,6 +1108,36 @@ // 16-element contiguous loads defm : pred_load; + multiclass pred_store { + def _default : Pat<(Store (Ty ZPR:$vec), GPR64:$base, (PredTy PPR:$gp)), + (RegImmInst ZPR:$vec, PPR:$gp, GPR64:$base, (i64 0))>; + } + + // 2-element contiguous stores + defm : pred_store; + defm : pred_store; + defm : pred_store; + defm : pred_store; + defm : pred_store; + defm : pred_store; + defm : pred_store; + + // 4-element contiguous stores + defm : pred_store; + defm : pred_store; + defm : pred_store; + defm : pred_store; + defm : pred_store; + + // 8-element contiguous stores + defm : pred_store; + defm : pred_store; + defm : pred_store; + + // 16-element contiguous stores + defm : pred_store; + } let Predicates = [HasSVE2] in { Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h =================================================================== --- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -147,7 +147,7 @@ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info); - bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) { + bool isLegalMaskedLoadStore(Type *DataType, MaybeAlign Alignment) { if (!isa(DataType) || !ST->hasSVE()) return false; @@ -162,6 +162,14 @@ return false; } + bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) { + return isLegalMaskedLoadStore(DataType, Alignment); + } + + bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) { + return isLegalMaskedLoadStore(DataType, Alignment); + } + int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, unsigned Alignment, unsigned AddressSpace, Index: llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll +++ llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll @@ -74,6 +74,80 @@ ret %load } +; +; Masked Stores +; + +define void @masked_store_nxv2i64( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv2i64: +; CHECK: st1d { [[IN]].d }, [[PG]], [x0] + call void @llvm.masked.store.nxv2i64( %val, *%a, i32 8, %mask) + ret void +} + +define void @masked_store_nxv4i32( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv4i32: +; CHECK: st1w { [[IN]].s }, [[PG]], [x0] + call void @llvm.masked.store.nxv4i32( %val, *%a, i32 4, %mask) + ret void +} + +define void @masked_store_nxv8i16( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv8i16: +; CHECK: st1h { [[IN]].h }, [[PG]], [x0] + call void @llvm.masked.store.nxv8i16( %val, *%a, i32 2, %mask) + ret void +} + +define void @masked_store_nxv16i8( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv16i8: +; CHECK: st1b { [[IN]].b }, [[PG]], [x0] + call void @llvm.masked.store.nxv16i8( %val, *%a, i32 1, %mask) + ret void +} + +define void @masked_store_nxv2f64( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv2f64: +; CHECK: st1d { [[IN]].d }, [[PG]], [x0] + call void @llvm.masked.store.nxv2f64( %val, *%a, i32 8, %mask) + ret void +} + +define void @masked_store_nxv2f32( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv2f32: +; CHECK: st1w { [[IN]].d }, [[PG]], [x0] + call void @llvm.masked.store.nxv2f32( %val, *%a, i32 4, %mask) + ret void +} + +define void @masked_store_nxv2f16( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv2f16: +; CHECK: st1h { [[IN]].d }, [[PG]], [x0] + call void @llvm.masked.store.nxv2f16( %val, *%a, i32 4, %mask) + ret void +} + +define void @masked_store_nxv4f32( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv4f32: +; CHECK: st1w { [[IN]].s }, [[PG]], [x0] + call void @llvm.masked.store.nxv4f32( %val, *%a, i32 4, %mask) + ret void +} + +define void @masked_store_nxv4f16( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv4f16: +; CHECK: st1h { [[IN]].s }, [[PG]], [x0] + call void @llvm.masked.store.nxv4f16( %val, *%a, i32 2, %mask) + ret void +} + +define void @masked_store_nxv8f16( *%a, %val, %mask) { +; CHECK-LABEL: masked_store_nxv8f16: +; CHECK: st1h { [[IN]].h }, [[PG]], [x0] + call void @llvm.masked.store.nxv8f16( %val, *%a, i32 2, %mask) + ret void +} + declare @llvm.masked.load.nxv2i64(*, i32, , ) declare @llvm.masked.load.nxv4i32(*, i32, , ) declare @llvm.masked.load.nxv8i16(*, i32, , ) @@ -85,3 +159,15 @@ declare @llvm.masked.load.nxv4f32(*, i32, , ) declare @llvm.masked.load.nxv4f16(*, i32, , ) declare @llvm.masked.load.nxv8f16(*, i32, , ) + +declare void @llvm.masked.store.nxv2i64(, *, i32, ) +declare void @llvm.masked.store.nxv4i32(, *, i32, ) +declare void @llvm.masked.store.nxv8i16(, *, i32, ) +declare void @llvm.masked.store.nxv16i8(, *, i32, ) + +declare void @llvm.masked.store.nxv2f64(, *, i32, ) +declare void @llvm.masked.store.nxv2f32(, *, i32, ) +declare void @llvm.masked.store.nxv2f16(, *, i32, ) +declare void @llvm.masked.store.nxv4f32(, *, i32, ) +declare void @llvm.masked.store.nxv4f16(, *, i32, ) +declare void @llvm.masked.store.nxv8f16(, *, i32, ) Index: llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll @@ -0,0 +1,76 @@ +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; Masked Stores +; + +define void @masked_trunc_store_nxv2i8( *%a, *%b, %mask) { +; CHECK-LABEL: masked_trunc_store_nxv2i8: +; CHECK: ld1d { [[IN:z[0-9]]].d }, [[PG:p[0-9]]]/z, [x0] +; CHECK: st1b { [[IN]].d }, [[PG]], [x1] + %load = call @llvm.masked.load.nxv2i64( *%a, i32 8, %mask, undef) + %trunc = trunc %load to + call void @llvm.masked.store.nxv2i8( %trunc, *%b, i32 8, %mask) + ret void +} + +define void @masked_trunc_store_nxv2i16( *%a, *%b, %mask) { +; CHECK-LABEL: masked_trunc_store_nxv2i16: +; CHECK: ld1d { [[IN:z[0-9]]].d }, [[PG:p[0-9]]]/z, [x0] +; CHECK: st1h { [[IN]].d }, [[PG]], [x1] + %load = call @llvm.masked.load.nxv2i64( *%a, i32 8, %mask, undef) + %trunc = trunc %load to + call void @llvm.masked.store.nxv2i16( %trunc, *%b, i32 8, %mask) + ret void +} + +define void @masked_trunc_store_nxv2i32( *%a, *%b, %mask) { +; CHECK-LABEL: masked_trunc_store_nxv2i32: +; CHECK: ld1d { [[IN:z[0-9]]].d }, [[PG:p[0-9]]]/z, [x0] +; CHECK: st1w { [[IN]].d }, [[PG]], [x1] + %load = call @llvm.masked.load.nxv2i64( *%a, i32 8, %mask, undef) + %trunc = trunc %load to + call void @llvm.masked.store.nxv2i32( %trunc, *%b, i32 8, %mask) + ret void +} + +define void @masked_trunc_store_nxv4i8( *%a, *%b, %mask) { +; CHECK-LABEL: masked_trunc_store_nxv4i8: +; CHECK: ld1w { [[IN:z[0-9]]].s }, [[PG:p[0-9]]]/z, [x0] +; CHECK: st1b { [[IN]].s }, [[PG]], [x1] + %load = call @llvm.masked.load.nxv4i32( *%a, i32 4, %mask, undef) + %trunc = trunc %load to + call void @llvm.masked.store.nxv4i8( %trunc, *%b, i32 4, %mask) + ret void +} + +define void @masked_trunc_store_nxv4i16( *%a, *%b, %mask) { +; CHECK-LABEL: masked_trunc_store_nxv4i16: +; CHECK: ld1w { [[IN:z[0-9]]].s }, [[PG:p[0-9]]]/z, [x0] +; CHECK: st1h { [[IN]].s }, [[PG]], [x1] + %load = call @llvm.masked.load.nxv4i32( *%a, i32 4, %mask, undef) + %trunc = trunc %load to + call void @llvm.masked.store.nxv4i16( %trunc, *%b, i32 4, %mask) + ret void +} + +define void @masked_trunc_store_nxv8i8( *%a, *%b, %mask) { +; CHECK-LABEL: masked_trunc_store_nxv8i8: +; CHECK: ld1h { [[IN:z[0-9]]].h }, [[PG:p[0-9]]]/z, [x0] +; CHECK: st1b { [[IN]].h }, [[PG]], [x1] + %load = call @llvm.masked.load.nxv8i16( *%a, i32 2, %mask, undef) + %trunc = trunc %load to + call void @llvm.masked.store.nxv8i8( %trunc, *%b, i32 2, %mask) + ret void +} + +declare @llvm.masked.load.nxv2i64(*, i32, , ) +declare @llvm.masked.load.nxv4i32(*, i32, , ) +declare @llvm.masked.load.nxv8i16(*, i32, , ) + +declare void @llvm.masked.store.nxv2i8(, *, i32, ) +declare void @llvm.masked.store.nxv2i16(, *, i32, ) +declare void @llvm.masked.store.nxv2i32(, *, i32, ) +declare void @llvm.masked.store.nxv4i8(, *, i32, ) +declare void @llvm.masked.store.nxv4i16(, *, i32, ) +declare void @llvm.masked.store.nxv8i8(, *, i32, )