diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -261,7 +261,14 @@ void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc); void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc); void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc); - void SelectPredicatedStore(SDNode *N, unsigned NumVecs, const unsigned Opc); + template + void SelectPredicatedStore(SDNode *N, unsigned NumVecs, const unsigned Opc_rr, + const unsigned Opc_ri); + template + std::tuple + findAddrModeSVELoadStore(SDNode *N, const unsigned Opc_rr, + const unsigned Opc_ri, const SDValue &OldBase, + const SDValue &OldOffset); bool tryBitfieldExtractOp(SDNode *N); bool tryBitfieldExtractOpFromSExt(SDNode *N); @@ -1408,6 +1415,30 @@ CurDAG->RemoveDeadNode(N); } +/// Optimize \param OldBase and \param OldOffset selecting the best addressing +/// mode. Returns a tuple consisting of an Opcode, an SDValue representing the +/// new Base and an SDValue representing the new offset. +template +std::tuple +AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, const unsigned Opc_rr, + const unsigned Opc_ri, + const SDValue &OldBase, + const SDValue &OldOffset) { + SDValue NewBase = OldBase; + SDValue NewOffset = OldOffset; + // Detect a possible Reg+Imm addressing mode. + const bool IsRegImm = SelectAddrModeIndexedSVE( + N, OldBase, NewBase, NewOffset); + + // Detect a possible reg+reg addressing mode, but only if we haven't already + // detected a Reg+Imm one. + const bool IsRegReg = + !IsRegImm && SelectSVERegRegAddrMode(OldBase, NewBase, NewOffset); + + // Select the instruction. + return {IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset}; +} + void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc) { SDLoc dl(N); @@ -1428,18 +1459,27 @@ ReplaceNode(N, St); } +template void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs, - const unsigned Opc) { + const unsigned Opc_rr, + const unsigned Opc_ri) { SDLoc dl(N); // Form a REG_SEQUENCE to force register allocation. SmallVector Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); SDValue RegSeq = createZTuple(Regs); - SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate - N->getOperand(NumVecs + 3), // address - CurDAG->getTargetConstant(0, dl, MVT::i64), // offset - N->getOperand(0)}; // chain + // Optimize addressing mode. + unsigned Opc; + SDValue Offset, Base; + std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore( + N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3), + CurDAG->getTargetConstant(0, dl, MVT::i64)); + + SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate + Base, // address + Offset, // offset + N->getOperand(0)}; // chain SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops); ReplaceNode(N, St); @@ -3910,48 +3950,60 @@ } case Intrinsic::aarch64_sve_st2: { if (VT == MVT::nxv16i8) { - SelectPredicatedStore(Node, 2, AArch64::ST2B_IMM); + SelectPredicatedStore(Node, 2, AArch64::ST2B, + AArch64::ST2B_IMM); return; } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) { - SelectPredicatedStore(Node, 2, AArch64::ST2H_IMM); + SelectPredicatedStore(Node, 2, AArch64::ST2H, + AArch64::ST2H_IMM); return; } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { - SelectPredicatedStore(Node, 2, AArch64::ST2W_IMM); + SelectPredicatedStore(Node, 2, AArch64::ST2W, + AArch64::ST2W_IMM); return; } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { - SelectPredicatedStore(Node, 2, AArch64::ST2D_IMM); + SelectPredicatedStore(Node, 2, AArch64::ST2D, + AArch64::ST2D_IMM); return; } break; } case Intrinsic::aarch64_sve_st3: { if (VT == MVT::nxv16i8) { - SelectPredicatedStore(Node, 3, AArch64::ST3B_IMM); + SelectPredicatedStore(Node, 3, AArch64::ST3B, + AArch64::ST3B_IMM); return; } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) { - SelectPredicatedStore(Node, 3, AArch64::ST3H_IMM); + SelectPredicatedStore(Node, 3, AArch64::ST3H, + AArch64::ST3H_IMM); return; } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { - SelectPredicatedStore(Node, 3, AArch64::ST3W_IMM); + SelectPredicatedStore(Node, 3, AArch64::ST3W, + AArch64::ST3W_IMM); return; } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { - SelectPredicatedStore(Node, 3, AArch64::ST3D_IMM); + SelectPredicatedStore(Node, 3, AArch64::ST3D, + AArch64::ST3D_IMM); return; } break; } case Intrinsic::aarch64_sve_st4: { if (VT == MVT::nxv16i8) { - SelectPredicatedStore(Node, 4, AArch64::ST4B_IMM); + SelectPredicatedStore(Node, 4, AArch64::ST4B, + AArch64::ST4B_IMM); return; } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) { - SelectPredicatedStore(Node, 4, AArch64::ST4H_IMM); + SelectPredicatedStore(Node, 4, AArch64::ST4H, + AArch64::ST4H_IMM); return; } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { - SelectPredicatedStore(Node, 4, AArch64::ST4W_IMM); + SelectPredicatedStore(Node, 4, AArch64::ST4W, + AArch64::ST4W_IMM); return; } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { - SelectPredicatedStore(Node, 4, AArch64::ST4D_IMM); + SelectPredicatedStore(Node, 4, AArch64::ST4D, + AArch64::ST4D_IMM); return; } break; @@ -4587,6 +4639,9 @@ if (isa(Root)) return cast(Root)->getMemoryVT(); + if (isa(Root)) + return cast(Root)->getMemoryVT(); + const unsigned Opcode = Root->getOpcode(); // For custom ISD nodes, we have to look at them individually to extract the // type of the data moved to/from memory. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -8900,6 +8900,30 @@ DL, VT); } +/// Set the IntrinsicInfo for the `aarch64_sve_st` intrinsics. +template +static bool setInfoSVEStN(AArch64TargetLowering::IntrinsicInfo &Info, + const CallInst &CI) { + Info.opc = ISD::INTRINSIC_VOID; + // Retrieve EC from first vector argument. + const EVT VT = EVT::getEVT(CI.getArgOperand(0)->getType()); + ElementCount EC = VT.getVectorElementCount(); +#ifndef NDEBUG + // Check the assumption that all input vectors are the same type. + for (unsigned I = 0; I < NumVecs; ++I) + assert(VT == EVT::getEVT(CI.getArgOperand(I)->getType()) && + "Invalid type."); +#endif + // memVT is `NumVecs * VT`. + Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(), + EC * NumVecs); + Info.ptrVal = CI.getArgOperand(CI.getNumArgOperands() - 1); + Info.offset = 0; + Info.align.reset(); + Info.flags = MachineMemOperand::MOStore; + return true; +} + /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment /// specified in the intrinsic calls. @@ -8909,6 +8933,12 @@ unsigned Intrinsic) const { auto &DL = I.getModule()->getDataLayout(); switch (Intrinsic) { + case Intrinsic::aarch64_sve_st2: + return setInfoSVEStN<2>(Info, I); + case Intrinsic::aarch64_sve_st3: + return setInfoSVEStN<3>(Info, I); + case Intrinsic::aarch64_sve_st4: + return setInfoSVEStN<4>(Info, I); case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_ld4: diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll @@ -0,0 +1,613 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=0 < %s | FileCheck %s + +; NOTE: invalid, upper and lower bound immediate values of the reg+imm +; addressing mode are checked only for the byte version of each +; instruction (`stb`), as the code for detecting the immediate is +; common to all instructions, and varies only for the number of +; elements of the structured store, which is = 2, 3, 4. + +; +; ST2B +; + +define void @st2b_i8_valid_imm( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2b_i8_valid_imm: +; CHECK: st2b { z0.b, z1.b }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 2 + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2b_i8_invalid_imm_not_multiple_of_2( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2b_i8_invalid_imm_not_multiple_of_2: +; CHECK: rdvl x[[N:[0-9]+]], #3 +; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 3 + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2b_i8_invalid_imm_out_of_lower_bound( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2b_i8_invalid_imm_out_of_lower_bound: +; CHECK: rdvl x[[N:[0-9]+]], #-18 +; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -18 + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2b_i8_invalid_imm_out_of_upper_bound( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2b_i8_invalid_imm_out_of_upper_bound: +; CHECK: rdvl x[[N:[0-9]+]], #16 +; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 16 + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2b_i8_valid_imm_lower_bound( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2b_i8_valid_imm_lower_bound: +; CHECK: st2b { z0.b, z1.b }, p0, [x0, #-16, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -16 + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2b_i8_valid_imm_upper_bound( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2b_i8_valid_imm_upper_bound: +; CHECK: st2b { z0.b, z1.b }, p0, [x0, #14, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 14 + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST2H +; + +define void @st2h_i16( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2h_i16: +; CHECK: st2h { z0.h, z1.h }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 2 + call void @llvm.aarch64.sve.st2.nxv8i16( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2h_f16( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2h_f16: +; CHECK: st2h { z0.h, z1.h }, p0, [x0, #2, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 2 + call void @llvm.aarch64.sve.st2.nxv8f16( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST2W +; + +define void @st2w_i32( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2w_i32: +; CHECK: st2w { z0.s, z1.s }, p0, [x0, #4, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 4 + call void @llvm.aarch64.sve.st2.nxv4i32( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2w_f32( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2w_f32: +; CHECK: st2w { z0.s, z1.s }, p0, [x0, #6, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 6 + call void @llvm.aarch64.sve.st2.nxv4f32( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST2D +; + +define void @st2d_i64( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2d_i64: +; CHECK: st2d { z0.d, z1.d }, p0, [x0, #8, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 8 + call void @llvm.aarch64.sve.st2.nxv2i64( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2d_f64( %v0, %v1, %pred, * %addr) { +; CHECK-LABEL: st2d_f64: +; CHECK: st2d { z0.d, z1.d }, p0, [x0, #10, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 10 + call void @llvm.aarch64.sve.st2.nxv2f64( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST3B +; + +define void @st3b_i8_valid_imm( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_valid_imm: +; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #3, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 3 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3b_i8_invalid_imm_not_multiple_of_3_01( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_invalid_imm_not_multiple_of_3_01: +; CHECK: rdvl x[[N:[0-9]+]], #4 +; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 4 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3b_i8_invalid_imm_not_multiple_of_3_02( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_invalid_imm_not_multiple_of_3_02: +; CHECK: rdvl x[[N:[0-9]+]], #5 +; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 5 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3b_i8_invalid_imm_out_of_lower_bound( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_invalid_imm_out_of_lower_bound: +; CHECK: rdvl x[[N:[0-9]+]], #-27 +; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -27 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, %pred, + * %base) + ret void +} + +define void @st3b_i8_invalid_imm_out_of_upper_bound( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_invalid_imm_out_of_upper_bound: +; CHECK: rdvl x[[N:[0-9]+]], #24 +; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 24 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3b_i8_valid_imm_lower_bound( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_valid_imm_lower_bound: +; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #-24, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -24 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3b_i8_valid_imm_upper_bound( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3b_i8_valid_imm_upper_bound: +; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #21, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 21 + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST3H +; + +define void @st3h_i16( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3h_i16: +; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, #6, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 6 + call void @llvm.aarch64.sve.st3.nxv8i16( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3h_f16( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3h_f16: +; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, #9, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 9 + call void @llvm.aarch64.sve.st3.nxv8f16( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST3W +; + +define void @st3w_i32( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3w_i32: +; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, #12, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 12 + call void @llvm.aarch64.sve.st3.nxv4i32( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3w_f32( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3w_f32: +; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, #15, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 15 + call void @llvm.aarch64.sve.st3.nxv4f32( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST3D +; + +define void @st3d_i64( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3d_i64: +; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, #18, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 18 + call void @llvm.aarch64.sve.st3.nxv2i64( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3d_f64( %v0, %v1, %v2, %pred, * %addr) { +; CHECK-LABEL: st3d_f64: +; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, #-3, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -3 + call void @llvm.aarch64.sve.st3.nxv2f64( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST4B +; + +define void @st4b_i8_valid_imm( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_valid_imm: +; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #4, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 4 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_invalid_imm_not_multiple_of_4_01( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_01: +; CHECK: rdvl x[[N:[0-9]+]], #5 +; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 5 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_invalid_imm_not_multiple_of_4_02( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_02: +; CHECK: rdvl x[[N:[0-9]+]], #6 +; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 6 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_invalid_imm_not_multiple_of_4_03( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_03: +; CHECK: rdvl x[[N:[0-9]+]], #7 +; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 7 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_invalid_imm_out_of_lower_bound( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_invalid_imm_out_of_lower_bound: +; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9) +; xM = -9 * 2^6 +; xP = RDVL * 2^-4 +; xBASE = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36 +; CHECK: rdvl x[[N:[0-9]+]], #1 +; CHECK-DAG: mov x[[M:[0-9]+]], #-576 +; CHECK-DAG: lsr x[[P:[0-9]+]], x[[N]], #4 +; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]] +; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -36 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_invalid_imm_out_of_upper_bound( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_invalid_imm_out_of_upper_bound: +; FIXME: optimize OFFSET computation so that xOFFSET = (shl (RDVL #16) #1) +; xM = 2^9 +; xP = RDVL * 2^-4 +; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32 +; CHECK: rdvl x[[N:[0-9]+]], #1 +; CHECK-DAG: mov w[[M:[0-9]+]], #512 +; CHECK-DAG: lsr x[[P:[0-9]+]], x[[N]], #4 +; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]] +; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 32 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_valid_imm_lower_bound( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_valid_imm_lower_bound: +; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #-32, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 -32 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4b_i8_valid_imm_upper_bound( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4b_i8_valid_imm_upper_bound: +; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #28, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 28 + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +; +; ST4H +; + +define void @st4h_i16( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4h_i16: +; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, #8, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 8 + call void @llvm.aarch64.sve.st4.nxv8i16( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4h_f16( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4h_f16: +; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, #12, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 12 + call void @llvm.aarch64.sve.st4.nxv8f16( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +; +; ST4W +; + +define void @st4w_i32( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4w_i32: +; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, #16, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 16 + call void @llvm.aarch64.sve.st4.nxv4i32( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4w_f32( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4w_f32: +; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, #20, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 20 + call void @llvm.aarch64.sve.st4.nxv4f32( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +; +; ST4D +; + +define void @st4d_i64( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4d_i64: +; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, #24, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 24 + call void @llvm.aarch64.sve.st4.nxv2i64( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4d_f64( %v0, %v1, %v2, %v3, %pred, * %addr) { +; CHECK-LABEL: st4d_f64: +; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, #28, mul vl] +; CHECK-NEXT: ret + %base = getelementptr , * %addr, i64 28 + call void @llvm.aarch64.sve.st4.nxv2f64( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +declare void @llvm.aarch64.sve.st2.nxv16i8(, , , *) +declare void @llvm.aarch64.sve.st2.nxv8i16(, , , *) +declare void @llvm.aarch64.sve.st2.nxv4i32(, , , *) +declare void @llvm.aarch64.sve.st2.nxv2i64(, , , *) +declare void @llvm.aarch64.sve.st2.nxv8f16(, , , *) +declare void @llvm.aarch64.sve.st2.nxv4f32(, , , *) +declare void @llvm.aarch64.sve.st2.nxv2f64(, , , *) + +declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , *) + +declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , *) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll @@ -0,0 +1,367 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=0 < %s | FileCheck %s + +; +; ST2B +; + +define void @st2b_i8( %v0, %v1, %pred, i8* %addr, i64 %offset) { +; CHECK-LABEL: st2b_i8: +; CHECK: st2b { z0.b, z1.b }, p0, [x0, x1] +; CHECK-NEXT: ret + %1 = getelementptr i8, i8* %addr, i64 %offset + %base = bitcast i8* %1 to * + call void @llvm.aarch64.sve.st2.nxv16i8( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST2H +; + +define void @st2h_i16( %v0, %v1, %pred, i16* %addr, i64 %offset) { +; CHECK-LABEL: st2h_i16: +; CHECK: st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %1 = getelementptr i16, i16* %addr, i64 %offset + %base = bitcast i16* %1 to * + call void @llvm.aarch64.sve.st2.nxv8i16( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2h_f16( %v0, %v1, %pred, half* %addr, i64 %offset) { +; CHECK-LABEL: st2h_f16: +; CHECK: st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %1 = getelementptr half, half* %addr, i64 %offset + %base = bitcast half* %1 to * + call void @llvm.aarch64.sve.st2.nxv8f16( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST2W +; + +define void @st2w_i32( %v0, %v1, %pred, i32* %addr, i64 %offset) { +; CHECK-LABEL: st2w_i32: +; CHECK: st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %1 = getelementptr i32, i32* %addr, i64 %offset + %base = bitcast i32* %1 to * + call void @llvm.aarch64.sve.st2.nxv4i32( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2w_f32( %v0, %v1, %pred, float* %addr, i64 %offset) { +; CHECK-LABEL: st2w_f32: +; CHECK: st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %1 = getelementptr float, float* %addr, i64 %offset + %base = bitcast float* %1 to * + call void @llvm.aarch64.sve.st2.nxv4f32( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST2D +; + +define void @st2d_i64( %v0, %v1, %pred, i64* %addr, i64 %offset) { +; CHECK-LABEL: st2d_i64: +; CHECK: st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %1 = getelementptr i64, i64* %addr, i64 %offset + %base = bitcast i64* %1 to * + call void @llvm.aarch64.sve.st2.nxv2i64( %v0, + %v1, + %pred, + * %base) + ret void +} + +define void @st2d_f64( %v0, %v1, %pred, double* %addr, i64 %offset) { +; CHECK-LABEL: st2d_f64: +; CHECK: st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %1 = getelementptr double, double* %addr, i64 %offset + %base = bitcast double* %1 to * + call void @llvm.aarch64.sve.st2.nxv2f64( %v0, + %v1, + %pred, + * %base) + ret void +} + +; +; ST3B +; + +define void @st3b_i8( %v0, %v1, %v2, %pred, i8* %addr, i64 %offset) { +; CHECK-LABEL: st3b_i8: +; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, x1] +; CHECK-NEXT: ret + %1 = getelementptr i8, i8* %addr, i64 %offset + %base = bitcast i8* %1 to * + call void @llvm.aarch64.sve.st3.nxv16i8( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST3H +; + +define void @st3h_i16( %v0, %v1, %v2, %pred, i16* %addr, i64 %offset) { +; CHECK-LABEL: st3h_i16: +; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %1 = getelementptr i16, i16* %addr, i64 %offset + %base = bitcast i16* %1 to * + call void @llvm.aarch64.sve.st3.nxv8i16( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3h_f16( %v0, %v1, %v2, %pred, half* %addr, i64 %offset) { +; CHECK-LABEL: st3h_f16: +; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %1 = getelementptr half, half* %addr, i64 %offset + %base = bitcast half* %1 to * + call void @llvm.aarch64.sve.st3.nxv8f16( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST3W +; + +define void @st3w_i32( %v0, %v1, %v2, %pred, i32* %addr, i64 %offset) { +; CHECK-LABEL: st3w_i32: +; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %1 = getelementptr i32, i32* %addr, i64 %offset + %base = bitcast i32* %1 to * + call void @llvm.aarch64.sve.st3.nxv4i32( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3w_f32( %v0, %v1, %v2, %pred, float* %addr, i64 %offset) { +; CHECK-LABEL: st3w_f32: +; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %1 = getelementptr float, float* %addr, i64 %offset + %base = bitcast float* %1 to * + call void @llvm.aarch64.sve.st3.nxv4f32( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST3D +; + +define void @st3d_i64( %v0, %v1, %v2, %pred, i64* %addr, i64 %offset) { +; CHECK-LABEL: st3d_i64: +; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %1 = getelementptr i64, i64* %addr, i64 %offset + %base = bitcast i64* %1 to * + call void @llvm.aarch64.sve.st3.nxv2i64( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +define void @st3d_f64( %v0, %v1, %v2, %pred, double* %addr, i64 %offset) { +; CHECK-LABEL: st3d_f64: +; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %1 = getelementptr double, double* %addr, i64 %offset + %base = bitcast double* %1 to * + call void @llvm.aarch64.sve.st3.nxv2f64( %v0, + %v1, + %v2, + %pred, + * %base) + ret void +} + +; +; ST4B +; + +define void @st4b_i8( %v0, %v1, %v2, %v3, %pred, i8* %addr, i64 %offset) { +; CHECK-LABEL: st4b_i8: +; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x1] +; CHECK-NEXT: ret + %1 = getelementptr i8, i8* %addr, i64 %offset + %base = bitcast i8* %1 to * + call void @llvm.aarch64.sve.st4.nxv16i8( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +; +; ST4H +; + +define void @st4h_i16( %v0, %v1, %v2, %v3, %pred, i16* %addr, i64 %offset) { +; CHECK-LABEL: st4h_i16: +; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %1 = getelementptr i16, i16* %addr, i64 %offset + %base = bitcast i16* %1 to * + call void @llvm.aarch64.sve.st4.nxv8i16( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4h_f16( %v0, %v1, %v2, %v3, %pred, half* %addr, i64 %offset) { +; CHECK-LABEL: st4h_f16: +; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, x1, lsl #1] +; CHECK-NEXT: ret + %1 = getelementptr half, half* %addr, i64 %offset + %base = bitcast half* %1 to * + call void @llvm.aarch64.sve.st4.nxv8f16( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +; +; ST4W +; + +define void @st4w_i32( %v0, %v1, %v2, %v3, %pred, i32* %addr, i64 %offset) { +; CHECK-LABEL: st4w_i32: +; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %1 = getelementptr i32, i32* %addr, i64 %offset + %base = bitcast i32* %1 to * + call void @llvm.aarch64.sve.st4.nxv4i32( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4w_f32( %v0, %v1, %v2, %v3, %pred, float* %addr, i64 %offset) { +; CHECK-LABEL: st4w_f32: +; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, x1, lsl #2] +; CHECK-NEXT: ret + %1 = getelementptr float, float* %addr, i64 %offset + %base = bitcast float* %1 to * + call void @llvm.aarch64.sve.st4.nxv4f32( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +; +; ST4D +; + +define void @st4d_i64( %v0, %v1, %v2, %v3, %pred, i64* %addr, i64 %offset) { +; CHECK-LABEL: st4d_i64: +; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %1 = getelementptr i64, i64* %addr, i64 %offset + %base = bitcast i64* %1 to * + call void @llvm.aarch64.sve.st4.nxv2i64( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +define void @st4d_f64( %v0, %v1, %v2, %v3, %pred, double* %addr, i64 %offset) { +; CHECK-LABEL: st4d_f64: +; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, x1, lsl #3] +; CHECK-NEXT: ret + %1 = getelementptr double, double* %addr, i64 %offset + %base = bitcast double* %1 to * + call void @llvm.aarch64.sve.st4.nxv2f64( %v0, + %v1, + %v2, + %v3, + %pred, + * %base) + ret void +} + +declare void @llvm.aarch64.sve.st2.nxv16i8(, , , *) +declare void @llvm.aarch64.sve.st2.nxv8i16(, , , *) +declare void @llvm.aarch64.sve.st2.nxv4i32(, , , *) +declare void @llvm.aarch64.sve.st2.nxv2i64(, , , *) +declare void @llvm.aarch64.sve.st2.nxv8f16(, , , *) +declare void @llvm.aarch64.sve.st2.nxv4f32(, , , *) +declare void @llvm.aarch64.sve.st2.nxv2f64(, , , *) + +declare void @llvm.aarch64.sve.st3.nxv16i8(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv8i16(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv4i32(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv2i64(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv8f16(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv4f32(, , , , *) +declare void @llvm.aarch64.sve.st3.nxv2f64(, , , , *) + +declare void @llvm.aarch64.sve.st4.nxv16i8(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv8i16(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv4i32(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv2i64(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv8f16(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv4f32(, , , , , *) +declare void @llvm.aarch64.sve.st4.nxv2f64(, , , , , *)