diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -4646,9 +4646,13 @@ // For custom ISD nodes, we have to look at them individually to extract the // type of the data moved to/from memory. switch (Opcode) { + case AArch64ISD::LD1: + case AArch64ISD::LD1S: case AArch64ISD::LDNF1: case AArch64ISD::LDNF1S: return cast(Root->getOperand(3))->getVT(); + case AArch64ISD::ST1: + return cast(Root->getOperand(4))->getVT(); default: break; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -226,6 +226,8 @@ REINTERPRET_CAST, + LD1, + LD1S, LDNF1, LDNF1S, LDFF1, @@ -272,6 +274,8 @@ GLDNT1_INDEX, GLDNT1S, + ST1, + // Scatter store SST1, SST1_SCALED, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1415,6 +1415,8 @@ case AArch64ISD::INSR: return "AArch64ISD::INSR"; case AArch64ISD::PTEST: return "AArch64ISD::PTEST"; case AArch64ISD::PTRUE: return "AArch64ISD::PTRUE"; + case AArch64ISD::LD1: return "AArch64ISD::LD1"; + case AArch64ISD::LD1S: return "AArch64ISD::LD1S"; case AArch64ISD::LDNF1: return "AArch64ISD::LDNF1"; case AArch64ISD::LDNF1S: return "AArch64ISD::LDNF1S"; case AArch64ISD::LDFF1: return "AArch64ISD::LDFF1"; @@ -1454,6 +1456,8 @@ case AArch64ISD::GLDNT1_INDEX: return "AArch64ISD::GLDNT1_INDEX"; case AArch64ISD::GLDNT1S: return "AArch64ISD::GLDNT1S"; + case AArch64ISD::ST1: return "AArch64ISD::ST1"; + case AArch64ISD::SST1: return "AArch64ISD::SST1"; case AArch64ISD::SST1_SCALED: return "AArch64ISD::SST1_SCALED"; case AArch64ISD::SST1_SXTW: return "AArch64ISD::SST1_SXTW"; @@ -9041,7 +9045,6 @@ Info.align = Align(16); Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; return true; - case Intrinsic::aarch64_sve_ld1: case Intrinsic::aarch64_sve_ldnt1: { PointerType *PtrTy = cast(I.getArgOperand(1)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; @@ -9054,7 +9057,6 @@ Info.flags |= MachineMemOperand::MONonTemporal; return true; } - case Intrinsic::aarch64_sve_st1: case Intrinsic::aarch64_sve_stnt1: { PointerType *PtrTy = cast(I.getArgOperand(2)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; @@ -10515,6 +10517,7 @@ // SVE load instructions perform an implicit zero-extend, which makes them // perfect candidates for combining. switch (Src->getOpcode()) { + case AArch64ISD::LD1: case AArch64ISD::LDNF1: case AArch64ISD::LDFF1: MemVT = cast(Src->getOperand(3))->getVT(); @@ -11581,7 +11584,33 @@ } } -static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG) { +static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + + if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) + return SDValue(); + + EVT ContainerVT = VT; + if (ContainerVT.isInteger()) + ContainerVT = getSVEContainerType(ContainerVT); + + SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other); + SDValue Ops[] = { N->getOperand(0), // Chain + N->getOperand(2), // Pg + N->getOperand(3), // Base + DAG.getValueType(VT) }; + + SDValue Load = DAG.getNode(Opc, DL, VTs, Ops); + SDValue LoadChain = SDValue(Load.getNode(), 1); + + if (ContainerVT.isInteger() && (VT != ContainerVT)) + Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0)); + + return DAG.getMergeValues({ Load, LoadChain }, DL); +} + +static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); EVT VT = N->getValueType(0); EVT PtrTy = N->getOperand(3).getValueType(); @@ -11608,6 +11637,32 @@ static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); + SDValue Data = N->getOperand(2); + EVT DataVT = Data.getValueType(); + EVT HwSrcVt = getSVEContainerType(DataVT); + SDValue InputVT = DAG.getValueType(DataVT); + + if (DataVT.isFloatingPoint()) + InputVT = DAG.getValueType(HwSrcVt); + + SDValue SrcNew; + if (Data.getValueType().isFloatingPoint()) + SrcNew = DAG.getNode(ISD::BITCAST, DL, HwSrcVt, Data); + else + SrcNew = DAG.getNode(ISD::ANY_EXTEND, DL, HwSrcVt, Data); + + SDValue Ops[] = { N->getOperand(0), // Chain + SrcNew, + N->getOperand(4), // Base + N->getOperand(3), // Pg + InputVT + }; + + return DAG.getNode(AArch64ISD::ST1, DL, N->getValueType(0), Ops); +} + +static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) { + SDLoc DL(N); SDValue Data = N->getOperand(2); EVT DataVT = Data.getValueType(); @@ -11623,32 +11678,6 @@ ISD::UNINDEXED, false, false); } -static SDValue performLDNF1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) { - SDLoc DL(N); - EVT VT = N->getValueType(0); - - if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) - return SDValue(); - - EVT ContainerVT = VT; - if (ContainerVT.isInteger()) - ContainerVT = getSVEContainerType(ContainerVT); - - SDVTList VTs = DAG.getVTList(ContainerVT, MVT::Other); - SDValue Ops[] = { N->getOperand(0), // Chain - N->getOperand(2), // Pg - N->getOperand(3), // Base - DAG.getValueType(VT) }; - - SDValue Load = DAG.getNode(Opc, DL, VTs, Ops); - SDValue LoadChain = SDValue(Load.getNode(), 1); - - if (ContainerVT.isInteger() && (VT != ContainerVT)) - Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0)); - - return DAG.getMergeValues({ Load, LoadChain }, DL); -} - /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The /// load store optimizer pass will merge them to store pair stores. This should /// be better than a movi to create the vector zero followed by a vector store @@ -12963,6 +12992,10 @@ unsigned NewOpc; unsigned MemVTOpNum = 4; switch (Opc) { + case AArch64ISD::LD1: + NewOpc = AArch64ISD::LD1S; + MemVTOpNum = 3; + break; case AArch64ISD::LDNF1: NewOpc = AArch64ISD::LDNF1S; MemVTOpNum = 3; @@ -13189,9 +13222,8 @@ case Intrinsic::aarch64_neon_st3lane: case Intrinsic::aarch64_neon_st4lane: return performNEONPostLDSTCombine(N, DCI, DAG); - case Intrinsic::aarch64_sve_ld1: case Intrinsic::aarch64_sve_ldnt1: - return performLD1Combine(N, DAG); + return performLDNT1Combine(N, DAG); case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1); case Intrinsic::aarch64_sve_ldnt1_gather: @@ -13200,13 +13232,16 @@ return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1_INDEX); case Intrinsic::aarch64_sve_ldnt1_gather_uxtw: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1); + case Intrinsic::aarch64_sve_ld1: + return performLD1Combine(N, DAG, AArch64ISD::LD1); case Intrinsic::aarch64_sve_ldnf1: - return performLDNF1Combine(N, DAG, AArch64ISD::LDNF1); + return performLD1Combine(N, DAG, AArch64ISD::LDNF1); case Intrinsic::aarch64_sve_ldff1: - return performLDNF1Combine(N, DAG, AArch64ISD::LDFF1); + return performLD1Combine(N, DAG, AArch64ISD::LDFF1); case Intrinsic::aarch64_sve_st1: - case Intrinsic::aarch64_sve_stnt1: return performST1Combine(N, DAG); + case Intrinsic::aarch64_sve_stnt1: + return performSTNT1Combine(N, DAG); case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset: return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1); case Intrinsic::aarch64_sve_stnt1_scatter_uxtw: diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -13,18 +13,23 @@ def SVE8BitLslImm : ComplexPattern; def SVELShiftImm64 : ComplexPattern", []>; -// Non-faulting & first-faulting loads - node definitions +// Contiguous loads - node definitions // -def SDT_AArch64_LDNF1 : SDTypeProfile<1, 3, [ +def SDT_AArch64_LD1 : SDTypeProfile<1, 3, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>, SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1> ]>; -def AArch64ldnf1 : SDNode<"AArch64ISD::LDNF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; -def AArch64ldff1 : SDNode<"AArch64ISD::LDFF1", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; +def AArch64ld1 : SDNode<"AArch64ISD::LD1", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; +def AArch64ld1s : SDNode<"AArch64ISD::LD1S", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>; + +// Non-faulting & first-faulting loads - node definitions +// +def AArch64ldnf1 : SDNode<"AArch64ISD::LDNF1", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; +def AArch64ldff1 : SDNode<"AArch64ISD::LDFF1", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; -def AArch64ldnf1s : SDNode<"AArch64ISD::LDNF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; -def AArch64ldff1s : SDNode<"AArch64ISD::LDFF1S", SDT_AArch64_LDNF1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; +def AArch64ldnf1s : SDNode<"AArch64ISD::LDNF1S", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; +def AArch64ldff1s : SDNode<"AArch64ISD::LDFF1S", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>; // Gather loads - node definitions // @@ -73,6 +78,15 @@ def AArch64ldnt1_gather : SDNode<"AArch64ISD::GLDNT1", SDT_AArch64_GATHER_VS, [SDNPHasChain, SDNPMayLoad]>; def AArch64ldnt1s_gather : SDNode<"AArch64ISD::GLDNT1S", SDT_AArch64_GATHER_VS, [SDNPHasChain, SDNPMayLoad]>; +// Contiguous stores - node definitions +// +def SDT_AArch64_ST1 : SDTypeProfile<0, 4, [ + SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisVec<2>, + SDTCVecEltisVT<2,i1>, SDTCisSameNumEltsAs<0,2> +]>; + +def AArch64st1 : SDNode<"AArch64ISD::ST1", SDT_AArch64_ST1, [SDNPHasChain, SDNPMayStore]>; + // Scatter stores - node definitions // def SDT_AArch64_SCATTER_SV : SDTypeProfile<0, 5, [ @@ -1554,7 +1568,7 @@ defm Pat_Load_P4 : unpred_load_predicate; defm Pat_Load_P2 : unpred_load_predicate; - multiclass ldnf1 { + multiclass ld1 { // scalar + immediate (mul vl) let AddedComplexity = 1 in { def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)), @@ -1566,32 +1580,60 @@ (I PPR:$gp, GPR64sp:$base, (i64 0))>; } + // 2-element contiguous loads + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + + // 4-element contiguous loads + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + + // 8-element contiguous loads + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + + // 16-element contiguous loads + defm : ld1; + + // 2-element contiguous non-faulting loads - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; // 4-element contiguous non-faulting loads - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; // 8-element contiguous non-faulting loads - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; - defm : ldnf1; + defm : ld1; + defm : ld1; + defm : ld1; + defm : ld1; // 16-element contiguous non-faulting loads - defm : ldnf1; + defm : ld1; multiclass ldff1 { // reg + reg @@ -1632,6 +1674,37 @@ // 16-element contiguous first faulting loads defm : ldff1; + + multiclass st1 { + // scalar + immediate (mul vl) + let AddedComplexity = 1 in { + def : Pat<(Store (Ty ZPR:$vec), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), (PredTy PPR:$gp), MemVT), + (I ZPR:$vec, PPR:$gp, GPR64sp:$base, simm4s1:$offset)>; + } + + // base + def : Pat<(Store (Ty ZPR:$vec), GPR64:$base, (PredTy PPR:$gp), MemVT), + (I ZPR:$vec, PPR:$gp, GPR64:$base, (i64 0))>; + } + + // 2-element contiguous store + defm : st1; + defm : st1; + defm : st1; + defm : st1; + + // 4-element contiguous store + defm : st1; + defm : st1; + defm : st1; + + // 8-element contiguous store + defm : st1; + defm : st1; + + // 16-element contiguous store + defm : st1; + } let Predicates = [HasSVE2] in { diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll @@ -0,0 +1,469 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; LD1B +; + +define @ld1b_i8( %pred, i8* %addr) { +; CHECK-LABEL: ld1b_i8: +; CHECK: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, i8* %addr) + ret %res +} + +define @ld1b_h( %pred, i8* %addr) { +; CHECK-LABEL: ld1b_h: +; CHECK: ld1b { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pred, i8* %addr) + %res = zext %load to + ret %res +} + +define @ld1sb_h( %pred, i8* %addr) { +; CHECK-LABEL: ld1sb_h: +; CHECK: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pred, i8* %addr) + %res = sext %load to + ret %res +} + +define @ld1b_s( %pred, i8* %addr) { +; CHECK-LABEL: ld1b_s: +; CHECK: ld1b { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pred, i8* %addr) + %res = zext %load to + ret %res +} + +define @ld1sb_s( %pred, i8* %addr) { +; CHECK-LABEL: ld1sb_s: +; CHECK: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pred, i8* %addr) + %res = sext %load to + ret %res +} + +define @ld1b_d( %pred, i8* %addr) { +; CHECK-LABEL: ld1b_d: +; CHECK: ld1b { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pred, i8* %addr) + %res = zext %load to + ret %res +} + +define @ld1sb_d( %pred, i8* %addr) { +; CHECK-LABEL: ld1sb_d: +; CHECK: ld1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pred, i8* %addr) + %res = sext %load to + ret %res +} + +define @ld1b_upper_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_upper_bound: +; CHECK: ld1b { z0.b }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_inbound: +; CHECK: ld1b { z0.b }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_s_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_s_inbound: +; CHECK: ld1b { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pg, i8* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sb_s_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1sb_s_inbound: +; CHECK: ld1sb { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv4i8( %pg, i8* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1b_lower_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_lower_bound: +; CHECK: ld1b { z0.b }, p0/z, [x0, #-8, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -8 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_out_of_upper_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_out_of_upper_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #8 +; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x[[BASE]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 8 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +define @ld1b_out_of_lower_bound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_out_of_lower_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9 +; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x[[BASE]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -9 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv16i8( %pg, i8* %base_scalar) + ret %load +} + +; +; LD1H +; + +define @ld1h_i16( %pred, i16* %addr) { +; CHECK-LABEL: ld1h_i16: +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv8i16( %pred, i16* %addr) + ret %res +} + +define @ld1h_f16( %pred, half* %addr) { +; CHECK-LABEL: ld1h_f16: +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv8f16( %pred, half* %addr) + ret %res +} + +define @ld1h_s( %pred, i16* %addr) { +; CHECK-LABEL: ld1h_s: +; CHECK: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pred, i16* %addr) + %res = zext %load to + ret %res +} + +define @ld1sh_s( %pred, i16* %addr) { +; CHECK-LABEL: ld1sh_s: +; CHECK: ld1sh { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pred, i16* %addr) + %res = sext %load to + ret %res +} + +define @ld1h_d( %pred, i16* %addr) { +; CHECK-LABEL: ld1h_d: +; CHECK: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pred, i16* %addr) + %res = zext %load to + ret %res +} + +define @ld1sh_d( %pred, i16* %addr) { +; CHECK-LABEL: ld1sh_d: +; CHECK: ld1sh { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pred, i16* %addr) + %res = sext %load to + ret %res +} + +define @ld1b_h_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_h_inbound: +; CHECK: ld1b { z0.h }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pg, i8* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sb_h_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1sb_h_inbound: +; CHECK: ld1sb { z0.h }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv8i8( %pg, i8* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1h_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1h_inbound: +; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv8i16( %pg, i16* %base_scalar) + ret %load +} + +define @ld1h_s_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1h_s_inbound: +; CHECK: ld1h { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pg, i16* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sh_s_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1sh_s_inbound: +; CHECK: ld1sh { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv4i16( %pg, i16* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1b_d_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1b_d_inbound: +; CHECK: ld1b { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pg, i8* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sb_d_inbound( %pg, i8* %a) { +; CHECK-LABEL: ld1sb_d_inbound: +; CHECK: ld1sb { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %load = call @llvm.aarch64.sve.ld1.nxv2i8( %pg, i8* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1h_d_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1h_d_inbound: +; CHECK: ld1h { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pg, i16* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sh_d_inbound( %pg, i16* %a) { +; CHECK-LABEL: ld1sh_d_inbound: +; CHECK: ld1sh { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i16* + %load = call @llvm.aarch64.sve.ld1.nxv2i16( %pg, i16* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1h_f16_inbound( %pg, half* %a) { +; CHECK-LABEL: ld1h_f16_inbound: +; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast half* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to half* + %load = call @llvm.aarch64.sve.ld1.nxv8f16( %pg, half* %base_scalar) + ret %load +} + +; +; LD1W +; + +define @ld1w_i32( %pred, i32* %addr) { +; CHECK-LABEL: ld1w_i32: +; CHECK: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv4i32( %pred, i32* %addr) + ret %res +} + +define @ld1w_f32( %pred, float* %addr) { +; CHECK-LABEL: ld1w_f32: +; CHECK: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv4f32( %pred, float* %addr) + ret %res +} + +define @ld1w_d( %pred, i32* %addr) { +; CHECK-LABEL: ld1w_d: +; CHECK: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pred, i32* %addr) + %res = zext %load to + ret %res +} + +define @ld1sw_d( %pred, i32* %addr) { +; CHECK-LABEL: ld1sw_d: +; CHECK: ld1sw { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pred, i32* %addr) + %res = sext %load to + ret %res +} + +define @ld1w_inbound( %pg, i32* %a) { +; CHECK-LABEL: ld1w_inbound: +; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i32* + %load = call @llvm.aarch64.sve.ld1.nxv4i32( %pg, i32* %base_scalar) + ret %load +} + +define @ld1w_f32_inbound( %pg, float* %a) { +; CHECK-LABEL: ld1w_f32_inbound: +; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast float* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to float* + %load = call @llvm.aarch64.sve.ld1.nxv4f32( %pg, float* %base_scalar) + ret %load +} + +; +; LD1D +; + +define @ld1d_i64( %pred, i64* %addr) { +; CHECK-LABEL: ld1d_i64: +; CHECK: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv2i64( %pred, + i64* %addr) + ret %res +} + +define @ld1d_f64( %pred, double* %addr) { +; CHECK-LABEL: ld1d_f64: +; CHECK: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv2f64( %pred, + double* %addr) + ret %res +} + +define @ld1d_inbound( %pg, i64* %a) { +; CHECK-LABEL: ld1d_inbound: +; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i64* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i64* + %load = call @llvm.aarch64.sve.ld1.nxv2i64( %pg, i64* %base_scalar) + ret %load +} + +define @ld1w_d_inbound( %pg, i32* %a) { +; CHECK-LABEL: ld1w_d_inbound: +; CHECK: ld1w { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i32* + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pg, i32* %base_scalar) + %res = zext %load to + ret %res +} + +define @ld1sw_d_inbound( %pg, i32* %a) { +; CHECK-LABEL: ld1sw_d_inbound: +; CHECK: ld1sw { z0.d }, p0/z, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i32* + %load = call @llvm.aarch64.sve.ld1.nxv2i32( %pg, i32* %base_scalar) + %res = sext %load to + ret %res +} + +define @ld1d_f64_inbound( %pg, double* %a) { +; CHECK-LABEL: ld1d_f64_inbound: +; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast double* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to double* + %load = call @llvm.aarch64.sve.ld1.nxv2f64( %pg, double* %base_scalar) + ret %load +} + +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) + +declare @llvm.aarch64.sve.ld1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv8f16(, half*) + +declare @llvm.aarch64.sve.ld1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv4i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv4f32(, float*) + +declare @llvm.aarch64.sve.ld1.nxv2i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv2i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv2i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ld1.nxv2f64(, double*) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst1.ll deleted file mode 100644 --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst1.ll +++ /dev/null @@ -1,182 +0,0 @@ -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s - -; -; LD1B -; - -define @ld1b_i8( %pred, i8* %addr) { -; CHECK-LABEL: ld1b_i8: -; CHECK: ld1b { z0.b }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, - i8* %addr) - ret %res -} - -; -; LD1H -; - -define @ld1h_i16( %pred, i16* %addr) { -; CHECK-LABEL: ld1h_i16: -; CHECK: ld1h { z0.h }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv8i16( %pred, - i16* %addr) - ret %res -} - -define @ld1h_f16( %pred, half* %addr) { -; CHECK-LABEL: ld1h_f16: -; CHECK: ld1h { z0.h }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv8f16( %pred, - half* %addr) - ret %res -} - -; -; LD1W -; - -define @ld1w_i32( %pred, i32* %addr) { -; CHECK-LABEL: ld1w_i32: -; CHECK: ld1w { z0.s }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv4i32( %pred, - i32* %addr) - ret %res -} - -define @ld1w_f32( %pred, float* %addr) { -; CHECK-LABEL: ld1w_f32: -; CHECK: ld1w { z0.s }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv4f32( %pred, - float* %addr) - ret %res -} - -; -; LD1D -; - -define @ld1d_i64( %pred, i64* %addr) { -; CHECK-LABEL: ld1d_i64: -; CHECK: ld1d { z0.d }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv2i64( %pred, - i64* %addr) - ret %res -} - -define @ld1d_f64( %pred, double* %addr) { -; CHECK-LABEL: ld1d_f64: -; CHECK: ld1d { z0.d }, p0/z, [x0] -; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.ld1.nxv2f64( %pred, - double* %addr) - ret %res -} - -; -; ST1B -; - -define void @st1b_i8( %data, %pred, i8* %addr) { -; CHECK-LABEL: st1b_i8: -; CHECK: st1b { z0.b }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv16i8( %data, - %pred, - i8* %addr) - ret void -} - -; -; ST1H -; - -define void @st1h_i16( %data, %pred, i16* %addr) { -; CHECK-LABEL: st1h_i16: -; CHECK: st1h { z0.h }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv8i16( %data, - %pred, - i16* %addr) - ret void -} - -define void @st1h_f16( %data, %pred, half* %addr) { -; CHECK-LABEL: st1h_f16: -; CHECK: st1h { z0.h }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv8f16( %data, - %pred, - half* %addr) - ret void -} - -; -; ST1W -; - -define void @st1w_i32( %data, %pred, i32* %addr) { -; CHECK-LABEL: st1w_i32: -; CHECK: st1w { z0.s }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv4i32( %data, - %pred, - i32* %addr) - ret void -} - -define void @st1w_f32( %data, %pred, float* %addr) { -; CHECK-LABEL: st1w_f32: -; CHECK: st1w { z0.s }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv4f32( %data, - %pred, - float* %addr) - ret void -} - -; -; ST1D -; - -define void @st1d_i64( %data, %pred, i64* %addr) { -; CHECK-LABEL: st1d_i64: -; CHECK: st1d { z0.d }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv2i64( %data, - %pred, - i64* %addr) - ret void -} - -define void @st1d_f64( %data, %pred, double* %addr) { -; CHECK-LABEL: st1d_f64: -; CHECK: st1d { z0.d }, p0, [x0] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.st1.nxv2f64( %data, - %pred, - double* %addr) - ret void -} - -declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) -declare @llvm.aarch64.sve.ld1.nxv8i16(, i16*) -declare @llvm.aarch64.sve.ld1.nxv4i32(, i32*) -declare @llvm.aarch64.sve.ld1.nxv2i64(, i64*) -declare @llvm.aarch64.sve.ld1.nxv8f16(, half*) -declare @llvm.aarch64.sve.ld1.nxv4f32(, float*) -declare @llvm.aarch64.sve.ld1.nxv2f64(, double*) - -declare void @llvm.aarch64.sve.st1.nxv16i8(, , i8*) -declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) -declare void @llvm.aarch64.sve.st1.nxv4i32(, , i32*) -declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) -declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) -declare void @llvm.aarch64.sve.st1.nxv4f32(, , float*) -declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll @@ -0,0 +1,367 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; ST1B +; + +define void @st1b_i8( %data, %pred, i8* %addr) { +; CHECK-LABEL: st1b_i8: +; CHECK: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv16i8( %data, + %pred, + i8* %addr) + ret void +} + +define void @st1b_h( %data, %pred, i8* %addr) { +; CHECK-LABEL: st1b_h: +; CHECK: st1b { z0.h }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv8i8( %trunc, + %pred, + i8* %addr) + ret void +} + +define void @st1b_s( %data, %pred, i8* %addr) { +; CHECK-LABEL: st1b_s: +; CHECK: st1b { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i8( %trunc, + %pred, + i8* %addr) + ret void +} + +define void @st1b_d( %data, %pred, i8* %addr) { +; CHECK-LABEL: st1b_d: +; CHECK: st1b { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i8( %trunc, + %pred, + i8* %addr) + ret void +} + +define void @st1b_upper_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_upper_bound: +; CHECK: st1b { z0.b }, p0, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_inbound: +; CHECK: st1b { z0.b }, p0, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_lower_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_lower_bound: +; CHECK: st1b { z0.b }, p0, [x0, #-8, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -8 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_out_of_upper_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_out_of_upper_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #8 +; CHECK: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] +; CHECK: st1b { z0.b }, p0, [x[[BASE]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 8 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_out_of_lower_bound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_out_of_lower_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9 +; CHECK: add x[[BASE:[0-9]+]], x0, x[[OFFSET]] +; CHECK: st1b { z0.b }, p0, [x[[BASE]]] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -9 + %base_scalar = bitcast * %base to i8* + call void @llvm.aarch64.sve.st1.nxv16i8( %data, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_s_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_s_inbound: +; CHECK: st1b { z0.s }, p0, [x0, #7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 7 + %base_scalar = bitcast * %base to i8* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i8( %trunc, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_h_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_h_inbound: +; CHECK: st1b { z0.h }, p0, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i8* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv8i8( %trunc, %pg, i8* %base_scalar) + ret void +} + +define void @st1b_d_inbound( %data, %pg, i8* %a) { +; CHECK-LABEL: st1b_d_inbound: +; CHECK: st1b { z0.d }, p0, [x0, #-7, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i8* %a to * + %base = getelementptr , * %base_scalable, i64 -7 + %base_scalar = bitcast * %base to i8* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i8( %trunc, %pg, i8* %base_scalar) + ret void +} + +; +; ST1H +; + +define void @st1h_i16( %data, %pred, i16* %addr) { +; CHECK-LABEL: st1h_i16: +; CHECK: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv8i16( %data, + %pred, + i16* %addr) + ret void +} + +define void @st1h_f16( %data, %pred, half* %addr) { +; CHECK-LABEL: st1h_f16: +; CHECK: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv8f16( %data, + %pred, + half* %addr) + ret void +} + +define void @st1h_s( %data, %pred, i16* %addr) { +; CHECK-LABEL: st1h_s: +; CHECK: st1h { z0.s }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i16( %trunc, + %pred, + i16* %addr) + ret void +} + +define void @st1h_d( %data, %pred, i16* %addr) { +; CHECK-LABEL: st1h_d: +; CHECK: st1h { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i16( %trunc, + %pred, + i16* %addr) + ret void +} + +define void @st1h_inbound( %data, %pg, i16* %a) { +; CHECK-LABEL: st1h_inbound: +; CHECK: st1h { z0.h }, p0, [x0, #-1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 -1 + %base_scalar = bitcast * %base to i16* + call void @llvm.aarch64.sve.st1.nxv8i16( %data, %pg, i16* %base_scalar) + ret void +} + +define void @st1h_f16_inbound( %data, %pg, half* %a) { +; CHECK-LABEL: st1h_f16_inbound: +; CHECK: st1h { z0.h }, p0, [x0, #-5, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast half* %a to * + %base = getelementptr , * %base_scalable, i64 -5 + %base_scalar = bitcast * %base to half* + call void @llvm.aarch64.sve.st1.nxv8f16( %data, %pg, half* %base_scalar) + ret void +} + +define void @st1h_s_inbound( %data, %pg, i16* %a) { +; CHECK-LABEL: st1h_s_inbound: +; CHECK: st1h { z0.s }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 2 + %base_scalar = bitcast * %base to i16* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv4i16( %trunc, %pg, i16* %base_scalar) + ret void +} + +define void @st1h_d_inbound( %data, %pg, i16* %a) { +; CHECK-LABEL: st1h_d_inbound: +; CHECK: st1h { z0.d }, p0, [x0, #-4, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i16* %a to * + %base = getelementptr , * %base_scalable, i64 -4 + %base_scalar = bitcast * %base to i16* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i16( %trunc, %pg, i16* %base_scalar) + ret void +} + +; +; ST1W +; + +define void @st1w_i32( %data, %pred, i32* %addr) { +; CHECK-LABEL: st1w_i32: +; CHECK: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv4i32( %data, + %pred, + i32* %addr) + ret void +} + +define void @st1w_f32( %data, %pred, float* %addr) { +; CHECK-LABEL: st1w_f32: +; CHECK: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv4f32( %data, + %pred, + float* %addr) + ret void +} + +define void @st1w_d( %data, %pred, i32* %addr) { +; CHECK-LABEL: st1w_d: +; CHECK: st1w { z0.d }, p0, [x0] +; CHECK-NEXT: ret + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i32( %trunc, + %pred, + i32* %addr) + ret void +} + +define void @st1w_inbound( %data, %pg, i32* %a) { +; CHECK-LABEL: st1w_inbound: +; CHECK: st1w { z0.s }, p0, [x0, #6, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 6 + %base_scalar = bitcast * %base to i32* + call void @llvm.aarch64.sve.st1.nxv4i32( %data, %pg, i32* %base_scalar) + ret void +} + +define void @st1w_f32_inbound( %data, %pg, float* %a) { +; CHECK-LABEL: st1w_f32_inbound: +; CHECK: st1w { z0.s }, p0, [x0, #-1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast float* %a to * + %base = getelementptr , * %base_scalable, i64 -1 + %base_scalar = bitcast * %base to float* + call void @llvm.aarch64.sve.st1.nxv4f32( %data, %pg, float* %base_scalar) + ret void +} + +define void @st1w_d_inbound( %data, %pg, i32* %a) { +; CHECK-LABEL: st1w_d_inbound: +; CHECK: st1w { z0.d }, p0, [x0, #1, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i32* %a to * + %base = getelementptr , * %base_scalable, i64 1 + %base_scalar = bitcast * %base to i32* + %trunc = trunc %data to + call void @llvm.aarch64.sve.st1.nxv2i32( %trunc, %pg, i32* %base_scalar) + ret void +} + +; +; ST1D +; + +define void @st1d_i64( %data, %pred, i64* %addr) { +; CHECK-LABEL: st1d_i64: +; CHECK: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv2i64( %data, + %pred, + i64* %addr) + ret void +} + +define void @st1d_f64( %data, %pred, double* %addr) { +; CHECK-LABEL: st1d_f64: +; CHECK: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv2f64( %data, + %pred, + double* %addr) + ret void +} + +define void @st1d_inbound( %data, %pg, i64* %a) { +; CHECK-LABEL: st1d_inbound: +; CHECK: st1d { z0.d }, p0, [x0, #5, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast i64* %a to * + %base = getelementptr , * %base_scalable, i64 5 + %base_scalar = bitcast * %base to i64* + call void @llvm.aarch64.sve.st1.nxv2i64( %data, %pg, i64* %base_scalar) + ret void +} + +define void @st1d_f64_inbound( %data, %pg, double* %a) { +; CHECK-LABEL: st1d_f64_inbound: +; CHECK: st1d { z0.d }, p0, [x0, #-8, mul vl] +; CHECK-NEXT: ret + %base_scalable = bitcast double* %a to * + %base = getelementptr , * %base_scalable, i64 -8 + %base_scalar = bitcast * %base to double* + call void @llvm.aarch64.sve.st1.nxv2f64( %data, %pg, double* %base_scalar) + ret void +} + +declare void @llvm.aarch64.sve.st1.nxv16i8(, , i8*) + +declare void @llvm.aarch64.sve.st1.nxv8i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) + +declare void @llvm.aarch64.sve.st1.nxv4i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv4i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv4i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv4f32(, , float*) + +declare void @llvm.aarch64.sve.st1.nxv2i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv2i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv2i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) +declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*)