Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -245,8 +245,9 @@ unsigned SubRegIdx); void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); - void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, const unsigned Opc, - unsigned SubRegIdx); + template + void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, const unsigned Opc_rr, + const unsigned Opc_ri, unsigned SubRegIdx); bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm); /// SVE Reg+Imm addressing mode. @@ -1435,16 +1436,24 @@ return (IsRegReg) ? Opc_rr : Opc_ri; } +template void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs, - const unsigned Opc, + const unsigned Opc_rr, + const unsigned Opc_ri, unsigned SubRegIdx) { SDLoc dl(N); EVT VT = N->getValueType(0); SDValue Chain = N->getOperand(0); + // Optimize addressing mode. + SDValue Base = N->getOperand(2); + SDValue Offset = CurDAG->getTargetConstant(0, dl, MVT::i64); + const unsigned Opc = + findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, Base, Offset); + SDValue Ops[] = {N->getOperand(1), // Predicate - N->getOperand(2), // Memory operand - CurDAG->getTargetConstant(0, dl, MVT::i64), Chain}; + Base, // Memory operand + Offset, Chain}; const EVT ResTys[] = {MVT::Untyped, MVT::Other}; @@ -4621,48 +4630,60 @@ } case AArch64ISD::SVE_LD2: { if (VT == MVT::nxv16i8) { - SelectPredicatedLoad(Node, 2, AArch64::LD2B_IMM, AArch64::zsub0); + SelectPredicatedLoad<0 /*Scale*/>(Node, 2, AArch64::LD2B, + AArch64::LD2B_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) { - SelectPredicatedLoad(Node, 2, AArch64::LD2H_IMM, AArch64::zsub0); + SelectPredicatedLoad<1 /*Scale*/>(Node, 2, AArch64::LD2H, + AArch64::LD2H_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { - SelectPredicatedLoad(Node, 2, AArch64::LD2W_IMM, AArch64::zsub0); + SelectPredicatedLoad<2 /*Scale*/>(Node, 2, AArch64::LD2W, + AArch64::LD2W_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { - SelectPredicatedLoad(Node, 2, AArch64::LD2D_IMM, AArch64::zsub0); + SelectPredicatedLoad<3 /*Scale*/>(Node, 2, AArch64::LD2D, + AArch64::LD2D_IMM, AArch64::zsub0); return; } break; } case AArch64ISD::SVE_LD3: { if (VT == MVT::nxv16i8) { - SelectPredicatedLoad(Node, 3, AArch64::LD3B_IMM, AArch64::zsub0); + SelectPredicatedLoad<0 /*Scale*/>(Node, 3, AArch64::LD3B, + AArch64::LD3B_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) { - SelectPredicatedLoad(Node, 3, AArch64::LD3H_IMM, AArch64::zsub0); + SelectPredicatedLoad<1 /*Scale*/>(Node, 3, AArch64::LD3H, + AArch64::LD3H_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { - SelectPredicatedLoad(Node, 3, AArch64::LD3W_IMM, AArch64::zsub0); + SelectPredicatedLoad<2 /*Scale*/>(Node, 3, AArch64::LD3W, + AArch64::LD3W_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { - SelectPredicatedLoad(Node, 3, AArch64::LD3D_IMM, AArch64::zsub0); + SelectPredicatedLoad<3 /*Scale*/>(Node, 3, AArch64::LD3D, + AArch64::LD3D_IMM, AArch64::zsub0); return; } break; } case AArch64ISD::SVE_LD4: { if (VT == MVT::nxv16i8) { - SelectPredicatedLoad(Node, 4, AArch64::LD4B_IMM, AArch64::zsub0); + SelectPredicatedLoad<0 /*Scale*/>(Node, 4, AArch64::LD4B, + AArch64::LD4B_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) { - SelectPredicatedLoad(Node, 4, AArch64::LD4H_IMM, AArch64::zsub0); + SelectPredicatedLoad<1 /*Scale*/>(Node, 4, AArch64::LD4H, + AArch64::LD4H_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { - SelectPredicatedLoad(Node, 4, AArch64::LD4W_IMM, AArch64::zsub0); + SelectPredicatedLoad<2 /*Scale*/>(Node, 4, AArch64::LD4W, + AArch64::LD4W_IMM, AArch64::zsub0); return; } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { - SelectPredicatedLoad(Node, 4, AArch64::LD4D_IMM, AArch64::zsub0); + SelectPredicatedLoad<3 /*Scale*/>(Node, 4, AArch64::LD4D, + AArch64::LD4D_IMM, AArch64::zsub0); return; } break; @@ -4685,6 +4706,7 @@ /// integers MVT::nxxi s.t. M x bits = 128. If the input /// PredVT is not in the form MVT::nxxi1, it returns an invalid /// EVT. +template static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT) { if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1) return EVT(); @@ -4695,7 +4717,8 @@ return EVT(); EVT ScalarVT = EVT::getIntegerVT(Ctx, AArch64::SVEBitsPerBlock / NumElts); - EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, NumElts, /*IsScalable=*/true); + EVT MemVT = + EVT::getVectorVT(Ctx, ScalarVT, NumVec * NumElts, /*IsScalable=*/true); return MemVT; } @@ -4715,6 +4738,15 @@ case AArch64ISD::LDNF1: case AArch64ISD::LDNF1S: return cast(Root->getOperand(3))->getVT(); + case AArch64ISD::SVE_LD2: + return getPackedVectorTypeFromPredicateType<2>( + Ctx, Root->getOperand(1)->getValueType(0)); + case AArch64ISD::SVE_LD3: + return getPackedVectorTypeFromPredicateType<3>( + Ctx, Root->getOperand(1)->getValueType(0)); + case AArch64ISD::SVE_LD4: + return getPackedVectorTypeFromPredicateType<4>( + Ctx, Root->getOperand(1)->getValueType(0)); default: break; } @@ -4729,7 +4761,7 @@ // We are using an SVE prefetch intrinsic. Type must be inferred // from the width of the predicate. - return getPackedVectorTypeFromPredicateType( + return getPackedVectorTypeFromPredicateType<1>( Ctx, Root->getOperand(2)->getValueType(0)); } Index: llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll @@ -0,0 +1,420 @@ +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +; NOTE: invalid, upper and lower bound immediate values of the reg+imm +; addressing mode are checked only for the byte version of each +; instruction (`ldb`), as the code for detecting the immediate is +; common to all instructions, and varies only for the number of +; elements of the structure store, which is = 2, 3, 4. + +; ld2b +define @ld2.nxv32i8( %gp, *%addr) { +; CHECK-LABEL: ld2.nxv32i8: +; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, #2, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 2 +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %base) +ret %res +} + +define @ld2.nxv32i8_lower_bound( %gp, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_lower_bound: +; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, #-16, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %base) +ret %res +} + +define @ld2.nxv32i8_upper_bound( %gp, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_upper_bound: +; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, #14, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 14 +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %base) +ret %res +} + +define @ld2.nxv32i8_not_multiple_of_2( %gp, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_not_multiple_of_2: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #3 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 3 +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %base) +ret %res +} + +define @ld2.nxv32i8_outside_lower_bound( %gp, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_outside_lower_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #-18 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -18 +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %base) +ret %res +} + +define @ld2.nxv32i8_outside_upper_bound( %gp, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_outside_upper_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #16 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 16 +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %base) +ret %res +} + +; ld2h +define @ld2.nxv16i16( %gp, * %addr) { +; CHECK-LABEL: ld2.nxv16i16: +; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, #12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 12 +%res = call @llvm.aarch64.sve.ld2.nxv16i16( %gp, * %base) +ret %res +} + +define @ld2.nxv16f16( %gp, * %addr) { +; CHECK-LABEL: ld2.nxv16f16: +; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, #12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 12 +%res = call @llvm.aarch64.sve.ld2.nxv16f16( %gp, * %base) +ret %res +} + +; ld2w +define @ld2.nxv8i32( %gp, * %addr) { +; CHECK-LABEL: ld2.nxv8i32: +; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, #-12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -12 +%res = call @llvm.aarch64.sve.ld2.nxv8i32( %gp, * %base) +ret %res +} + +define @ld2.nxv8f32( %gp, * %addr) { +; C HECK-LABEL: ld2.nxv8f32: +; C HECK: ld2w { z0.s, z1.s }, p0/z, [x0, #-14, mul vl] +; C HECK-NEXT: ret +%base = getelementptr , * %addr, i64 -14 +%res = call @llvm.aarch64.sve.ld2.nxv8f32( %gp, * %base) +ret %res +} + +; ld2d +define @ld2.nxv4i64( %gp, * %addr) { +; CHECK-LABEL: ld2.nxv4i64: +; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, #-14, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -14 +%res = call @llvm.aarch64.sve.ld2.nxv4i64( %gp, * %base) +ret %res +} + +define @ld2.nxv4f64( %gp, * %addr) { +; CHECK-LABEL: ld2.nxv4f64: +; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, #-16, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%res = call @llvm.aarch64.sve.ld2.nxv4f64( %gp, * %base) +ret %res +} + +; ld3b +define @ld3.nxv48i8( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8: +; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 3 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +define @ld3.nxv48i8_lower_bound( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_lower_bound: +; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #-24, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -24 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +define @ld3.nxv48i8_upper_bound( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_upper_bound: +; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #21, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 21 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +define @ld3.nxv48i8_not_multiple_of_3_01( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_01: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #4 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 4 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +define @ld3.nxv48i8_not_multiple_of_3_02( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_02: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #5 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 5 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +define @ld3.nxv48i8_outside_lower_bound( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_outside_lower_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #-27 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -27 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +define @ld3.nxv48i8_outside_upper_bound( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_outside_upper_bound: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #24 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 24 +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %base) +ret %res +} + +; ld3h +define @ld3.nxv24i16( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv24i16: +; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 6 +%res = call @llvm.aarch64.sve.ld3.nxv24i16( %gp, * %base) +ret %res +} + +define @ld3.nxv24f16( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv24f16: +; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #6, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 6 +%res = call @llvm.aarch64.sve.ld3.nxv24f16( %gp, * %base) +ret %res +} + +; ld3w +define @ld3.nxv12i32( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv12i32: +; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, #9, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 9 +%res = call @llvm.aarch64.sve.ld3.nxv12i32( %gp, * %base) +ret %res +} + +define @ld3.nxv12f32( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv12f32: +; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, #-9, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -9 +%res = call @llvm.aarch64.sve.ld3.nxv12f32( %gp, * %base) +ret %res +} + +; ld3d +define @ld3.nxv6i64( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv6i64: +; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, #12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 12 +%res = call @llvm.aarch64.sve.ld3.nxv6i64( %gp, * %base) +ret %res +} + +define @ld3.nxv6f64( %gp, *%addr) { +; CHECK-LABEL: ld3.nxv6f64: +; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, #-12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -12 +%res = call @llvm.aarch64.sve.ld3.nxv6f64( %gp, * %base) +ret %res +} + +; ; ld4b +define @ld4.nxv64i8( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8: +; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #4, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 4 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_lower_bound( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_lower_bound: +; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #-32, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -32 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_upper_bound( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_upper_bound: +; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #28, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 28 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_not_multiple_of_4_01( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_01: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #5 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 5 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_not_multiple_of_4_02( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_02: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #6 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 6 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_not_multiple_of_4_03( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_03: +; CHECK: rdvl x[[OFFSET:[0-9]+]], #7 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 7 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_outside_lower_bound( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_outside_lower_bound: +; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9) +; xM = -9 * 2^6 +; xP = RDVL * 2^-4 +; xOFFSET = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36 +; CHECK: rdvl x[[N:[0-9]+]], #1 +; CHECK-DAG: mov x[[M:[0-9]+]], #-576 +; CHECK-DAG: lsr x[[P:[0-9]+]], x[[N]], #4 +; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]] +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -36 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +define @ld4.nxv64i8_outside_upper_bound( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_outside_upper_bound: +; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #16) #2) +; xM = 2^9 +; xP = RDVL * 2^-4 +; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32 +; CHECK: rdvl x[[N:[0-9]+]], #1 +; CHECK-DAG: mov w[[M:[0-9]+]], #512 +; CHECK-DAG: lsr x[[P:[0-9]+]], x[[N]], #4 +; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]] +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 32 +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %base) +ret %res +} + +; ld4h +define @ld4.nxv32i16( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv32i16: +; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #8, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 8 +%res = call @llvm.aarch64.sve.ld4.nxv32i16( %gp, * %base) +ret %res +} + +define @ld4.nxv32f16( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv32f16: +; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #8, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 8 +%res = call @llvm.aarch64.sve.ld4.nxv32f16( %gp, * %base) +ret %res +} + +; ld4w +define @ld4.nxv16i32( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv16i32: +; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, #12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 12 +%res = call @llvm.aarch64.sve.ld4.nxv16i32( %gp, * %base) +ret %res +} + +define @ld4.nxv16f32( %gp, * %addr) { +; CHECK-LABEL: ld4.nxv16f32: +; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, #-12, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -12 +%res = call @llvm.aarch64.sve.ld4.nxv16f32( %gp, * %base) +ret %res +} + +; ld4d +define @ld4.nxv8i64( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv8i64: +; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, #16, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 16 +%res = call @llvm.aarch64.sve.ld4.nxv8i64( %gp, * %base) +ret %res +} + +define @ld4.nxv8f64( %gp, *%addr) { +; CHECK-LABEL: ld4.nxv8f64: +; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, #-16, mul vl] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%res = call @llvm.aarch64.sve.ld4.nxv8f64( %gp, * %base) +ret %res +} + +declare @llvm.aarch64.sve.ld2.nxv32i8(, *) +declare @llvm.aarch64.sve.ld2.nxv16i16(, *) +declare @llvm.aarch64.sve.ld2.nxv16f16(, *) +declare @llvm.aarch64.sve.ld2.nxv8i32(, *) +declare @llvm.aarch64.sve.ld2.nxv4i64(, *) +declare @llvm.aarch64.sve.ld2.nxv8f32(, *) +declare @llvm.aarch64.sve.ld2.nxv4f64(, *) + +declare @llvm.aarch64.sve.ld3.nxv48i8(, *) +declare @llvm.aarch64.sve.ld3.nxv24i16(, *) +declare @llvm.aarch64.sve.ld3.nxv24f16(, *) +declare @llvm.aarch64.sve.ld3.nxv12i32(, *) +declare @llvm.aarch64.sve.ld3.nxv6i64(, *) +declare @llvm.aarch64.sve.ld3.nxv12f32(, *) +declare @llvm.aarch64.sve.ld3.nxv6f64(, *) + +declare @llvm.aarch64.sve.ld4.nxv64i8(, *) +declare @llvm.aarch64.sve.ld4.nxv32i16(, *) +declare @llvm.aarch64.sve.ld4.nxv32f16(, *) +declare @llvm.aarch64.sve.ld4.nxv16i32(, *) +declare @llvm.aarch64.sve.ld4.nxv8i64(, *) +declare @llvm.aarch64.sve.ld4.nxv16f32(, *) +declare @llvm.aarch64.sve.ld4.nxv8f64(, *) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll @@ -0,0 +1,214 @@ +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +; ld2b +define @ld2.nxv32i8( %gp, i8 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv32i8: +; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret +%addr2 = getelementptr i8, i8 * %addr, i64 %a +%addr3 = bitcast i8* %addr2 to * +%res = call @llvm.aarch64.sve.ld2.nxv32i8( %gp, * %addr3) +ret %res +} + +; ld2h +define @ld2.nxv16i16( %gp, i16 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv16i16: +; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr i16, i16 * %addr, i64 %a +%addr3 = bitcast i16* %addr2 to * +%res = call @llvm.aarch64.sve.ld2.nxv16i16( %gp, * %addr3) +ret %res +} + +; ld2w +define @ld2.nxv8i32( %gp, i32 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv8i32: +; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr i32, i32 * %addr, i64 %a +%addr3 = bitcast i32* %addr2 to * +%res = call @llvm.aarch64.sve.ld2.nxv8i32( %gp, * %addr3) +ret %res +} + +define @ld2.nxv8f32( %gp, float *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv8f32: +; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr float, float * %addr, i64 %a +%addr3 = bitcast float* %addr2 to * +%res = call @llvm.aarch64.sve.ld2.nxv8f32( %gp, * %addr3) +ret %res +} + +; ld2d +define @ld2.nxv4i64( %gp, i64 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv4i64: +; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr i64, i64 * %addr, i64 %a +%addr3 = bitcast i64* %addr2 to * +%res = call @llvm.aarch64.sve.ld2.nxv4i64( %gp, * %addr3) +ret %res +} + +define @ld2.nxv4f64( %gp, double *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv4f64: +; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr double, double * %addr, i64 %a +%addr3 = bitcast double* %addr2 to * +%res = call @llvm.aarch64.sve.ld2.nxv4f64( %gp, * %addr3) +ret %res +} + +; ld3b +define @ld3.nxv48i8( %gp, i8 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv48i8: +; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret +%addr2 = getelementptr i8, i8 * %addr, i64 %a +%addr3 = bitcast i8* %addr2 to * +%res = call @llvm.aarch64.sve.ld3.nxv48i8( %gp, * %addr3) +ret %res +} + +; ld3h +define @ld3.nxv24i16( %gp, i16 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv24i16: +; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr i16, i16 * %addr, i64 %a +%addr3 = bitcast i16* %addr2 to * +%res = call @llvm.aarch64.sve.ld3.nxv24i16( %gp, * %addr3) +ret %res +} + +; ld3w +define @ld3.nxv12i32( %gp, i32 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv12i32: +; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr i32, i32 * %addr, i64 %a +%addr3 = bitcast i32* %addr2 to * +%res = call @llvm.aarch64.sve.ld3.nxv12i32( %gp, * %addr3) +ret %res +} + +define @ld3.nxv12f32( %gp, float *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv12f32: +; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr float, float * %addr, i64 %a +%addr3 = bitcast float* %addr2 to * +%res = call @llvm.aarch64.sve.ld3.nxv12f32( %gp, * %addr3) +ret %res +} + +; ld3d +define @ld3.nxv6i64( %gp, i64 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv6i64: +; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr i64, i64 * %addr, i64 %a +%addr3 = bitcast i64* %addr2 to * +%res = call @llvm.aarch64.sve.ld3.nxv6i64( %gp, * %addr3) +ret %res +} + +define @ld3.nxv6f64( %gp, double *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv6f64: +; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr double, double * %addr, i64 %a +%addr3 = bitcast double* %addr2 to * +%res = call @llvm.aarch64.sve.ld3.nxv6f64( %gp, * %addr3) +ret %res +} + +; ld4b +define @ld4.nxv64i8( %gp, i8 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv64i8: +; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret +%addr2 = getelementptr i8, i8 * %addr, i64 %a +%addr3 = bitcast i8* %addr2 to * +%res = call @llvm.aarch64.sve.ld4.nxv64i8( %gp, * %addr3) +ret %res +} + +; ld4h +define @ld4.nxv32i16( %gp, i16 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv32i16: +; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr i16, i16 * %addr, i64 %a +%addr3 = bitcast i16* %addr2 to * +%res = call @llvm.aarch64.sve.ld4.nxv32i16( %gp, * %addr3) +ret %res +} + +; ld4h +define @ld4.nxv16i32( %gp, i32 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv16i32: +; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr i32, i32 * %addr, i64 %a +%addr3 = bitcast i32* %addr2 to * +%res = call @llvm.aarch64.sve.ld4.nxv16i32( %gp, * %addr3) +ret %res +} + +define @ld4.nxv16f32( %gp, float *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv16f32: +; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr float, float * %addr, i64 %a +%addr3 = bitcast float* %addr2 to * +%res = call @llvm.aarch64.sve.ld4.nxv16f32( %gp, * %addr3) +ret %res +} + +; ld4d +define @ld4.nxv8i64( %gp, i64 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv8i64: +; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr i64, i64 * %addr, i64 %a +%addr3 = bitcast i64* %addr2 to * +%res = call @llvm.aarch64.sve.ld4.nxv8i64( %gp, * %addr3) +ret %res +} + +define @ld4.nxv8f64( %gp, double *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv8f64: +; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr double, double * %addr, i64 %a +%addr3 = bitcast double* %addr2 to * +%res = call @llvm.aarch64.sve.ld4.nxv8f64( %gp, * %addr3) +ret %res +} + +declare @llvm.aarch64.sve.ld2.nxv32i8(, *) +declare @llvm.aarch64.sve.ld2.nxv16i16(, *) +declare @llvm.aarch64.sve.ld2.nxv8i32(, *) +declare @llvm.aarch64.sve.ld2.nxv4i64(, *) +declare @llvm.aarch64.sve.ld2.nxv8f32(, *) +declare @llvm.aarch64.sve.ld2.nxv4f64(, *) + +declare @llvm.aarch64.sve.ld3.nxv48i8(, *) +declare @llvm.aarch64.sve.ld3.nxv24i16(, *) +declare @llvm.aarch64.sve.ld3.nxv12i32(, *) +declare @llvm.aarch64.sve.ld3.nxv6i64(, *) +declare @llvm.aarch64.sve.ld3.nxv12f32(, *) +declare @llvm.aarch64.sve.ld3.nxv6f64(, *) + +declare @llvm.aarch64.sve.ld4.nxv64i8(, *) +declare @llvm.aarch64.sve.ld4.nxv32i16(, *) +declare @llvm.aarch64.sve.ld4.nxv16i32(, *) +declare @llvm.aarch64.sve.ld4.nxv8i64(, *) +declare @llvm.aarch64.sve.ld4.nxv16f32(, *) +declare @llvm.aarch64.sve.ld4.nxv8f64(, *)