diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -962,6 +962,24 @@ LLVMPointerToElt<0>], [IntrReadMem, IntrArgMemOnly]>; + class AdvSIMD_2Vec_PredLoad_Intrinsic + : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMPointerToElt<0>], + [IntrReadMem, IntrArgMemOnly]>; + + class AdvSIMD_3Vec_PredLoad_Intrinsic + : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMPointerToElt<0>], + [IntrReadMem, IntrArgMemOnly]>; + + class AdvSIMD_4Vec_PredLoad_Intrinsic + : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMPointerToElt<0>], + [IntrReadMem, IntrArgMemOnly]>; + class AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -1535,6 +1553,10 @@ def int_aarch64_sve_ld3 : AdvSIMD_ManyVec_PredLoad_Intrinsic; def int_aarch64_sve_ld4 : AdvSIMD_ManyVec_PredLoad_Intrinsic; +def int_aarch64_sve_ld2_sret : AdvSIMD_2Vec_PredLoad_Intrinsic; +def int_aarch64_sve_ld3_sret : AdvSIMD_3Vec_PredLoad_Intrinsic; +def int_aarch64_sve_ld4_sret : AdvSIMD_4Vec_PredLoad_Intrinsic; + def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic; def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic; def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic; diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -286,7 +286,8 @@ void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale, - unsigned Opc_rr, unsigned Opc_ri); + unsigned Opc_rr, unsigned Opc_ri, + bool IsIntr = false); bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm); /// SVE Reg+Imm addressing mode. @@ -1487,7 +1488,7 @@ void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale, unsigned Opc_ri, - unsigned Opc_rr) { + unsigned Opc_rr, bool IsIntr) { assert(Scale < 4 && "Invalid scaling value."); SDLoc DL(N); EVT VT = N->getValueType(0); @@ -1497,11 +1498,11 @@ SDValue Base, Offset; unsigned Opc; std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore( - N, Opc_rr, Opc_ri, N->getOperand(2), + N, Opc_rr, Opc_ri, N->getOperand(IsIntr ? 3 : 2), CurDAG->getTargetConstant(0, DL, MVT::i64), Scale); - SDValue Ops[] = {N->getOperand(1), // Predicate - Base, // Memory operand + SDValue Ops[] = {N->getOperand(IsIntr ? 2 : 1), // Predicate + Base, // Memory operand Offset, Chain}; const EVT ResTys[] = {MVT::Untyped, MVT::Other}; @@ -3894,6 +3895,69 @@ case Intrinsic::aarch64_ld64b: SelectLoad(Node, 8, AArch64::LD64B, AArch64::x8sub_0); return; + case Intrinsic::aarch64_sve_ld2_sret: { + if (VT == MVT::nxv16i8) { + SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B, + true); + return; + } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 || + (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) { + SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H, + true); + return; + } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { + SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W, + true); + return; + } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { + SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D, + true); + return; + } + break; + } + case Intrinsic::aarch64_sve_ld3_sret: { + if (VT == MVT::nxv16i8) { + SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B, + true); + return; + } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 || + (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) { + SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H, + true); + return; + } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { + SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W, + true); + return; + } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { + SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D, + true); + return; + } + break; + } + case Intrinsic::aarch64_sve_ld4_sret: { + if (VT == MVT::nxv16i8) { + SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B, + true); + return; + } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 || + (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) { + SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H, + true); + return; + } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) { + SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W, + true); + return; + } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) { + SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D, + true); + return; + } + break; + } } } break; case ISD::INTRINSIC_WO_CHAIN: { diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll @@ -0,0 +1,568 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s + +; NOTE: invalid, upper and lower bound immediate values of the regimm +; addressing mode are checked only for the byte version of each +; instruction (`ldb`), as the code for detecting the immediate is +; common to all instructions, and varies only for the number of +; elements of the structure store, which is = 2, 3, 4. + +; ld2b +define { , } @ld2.nxv32i8( %Pg, *%addr) { +; CHECK-LABEL: ld2.nxv32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #2 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 2 +%base_ptr = bitcast * %base to i8* +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv32i8_lower_bound( %Pg, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_lower_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-16 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%base_ptr = bitcast * %base to i8 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv32i8_upper_bound( %Pg, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_upper_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #14 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 14 +%base_ptr = bitcast * %base to i8 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv32i8_not_multiple_of_2( %Pg, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_not_multiple_of_2: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #3 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 3 +%base_ptr = bitcast * %base to i8 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv32i8_outside_lower_bound( %Pg, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_outside_lower_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-18 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -18 +%base_ptr = bitcast * %base to i8 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv32i8_outside_upper_bound( %Pg, *%addr) { +; CHECK-LABEL: ld2.nxv32i8_outside_upper_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #16 +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 16 +%base_ptr = bitcast * %base to i8 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , } %res +} + +; ld2h +define { , } @ld2.nxv16i16( %Pg, * %addr) { +; CHECK-LABEL: ld2.nxv16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #14 +; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 14 +%base_ptr = bitcast * %base to i16 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16( %Pg, i16 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv16f16( %Pg, * %addr) { +; CHECK-LABEL: ld2.nxv16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-16 +; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%base_ptr = bitcast * %base to half * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16( %Pg, half *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv16bf16( %Pg, * %addr) #0 { +; CHECK-LABEL: ld2.nxv16bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #12 +; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 12 +%base_ptr = bitcast * %base to bfloat * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16( %Pg, bfloat *%base_ptr) +ret { , } %res +} + +; ld2w +define { , } @ld2.nxv8i32( %Pg, * %addr) { +; CHECK-LABEL: ld2.nxv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #14 +; CHECK-NEXT: ld2w { z0.s, z1.s }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 14 +%base_ptr = bitcast * %base to i32 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32( %Pg, i32 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv8f32( %Pg, * %addr) { +; CHECK-LABEL: ld2.nxv8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-16 +; CHECK-NEXT: ld2w { z0.s, z1.s }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%base_ptr = bitcast * %base to float * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32( %Pg, float *%base_ptr) +ret { , } %res +} + +; ld2d +define { , } @ld2.nxv4i64( %Pg, * %addr) { +; CHECK-LABEL: ld2.nxv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #14 +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 14 +%base_ptr = bitcast * %base to i64 * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64( %Pg, i64 *%base_ptr) +ret { , } %res +} + +define { , } @ld2.nxv4f64( %Pg, * %addr) { +; CHECK-LABEL: ld2.nxv4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-16 +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -16 +%base_ptr = bitcast * %base to double * +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64( %Pg, double *%base_ptr) +ret { , } %res +} + +; ld3b +define { , , } @ld3.nxv48i8( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #3 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 3 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv48i8_lower_bound( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_lower_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-24 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -24 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv48i8_upper_bound( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_upper_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #21 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 21 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv48i8_not_multiple_of_3_01( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_01: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #4 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 4 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv48i8_not_multiple_of_3_02( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_02: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #5 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 5 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv48i8_outside_lower_bound( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_outside_lower_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-27 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -27 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv48i8_outside_upper_bound( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv48i8_outside_upper_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #24 +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 24 +%base_ptr = bitcast * %base to i8 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , } %res +} + +; ld3h +define { , , } @ld3.nxv24i16( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv24i16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #21 +; CHECK-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 21 +%base_ptr = bitcast * %base to i16 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16( %Pg, i16 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv24f16( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv24f16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #21 +; CHECK-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 21 +%base_ptr = bitcast * %base to half * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16( %Pg, half *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv24bf16( %Pg, *%addr) #0 { +; CHECK-LABEL: ld3.nxv24bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-24 +; CHECK-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -24 +%base_ptr = bitcast * %base to bfloat * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16( %Pg, bfloat *%base_ptr) +ret { , , } %res +} + +; ld3w +define { , , } @ld3.nxv12i32( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv12i32: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #21 +; CHECK-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 21 +%base_ptr = bitcast * %base to i32 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32( %Pg, i32 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv12f32( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv12f32: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-24 +; CHECK-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -24 +%base_ptr = bitcast * %base to float * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32( %Pg, float *%base_ptr) +ret { , , } %res +} + +; ld3d +define { , , } @ld3.nxv6i64( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv6i64: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #21 +; CHECK-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 21 +%base_ptr = bitcast * %base to i64 * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64( %Pg, i64 *%base_ptr) +ret { , , } %res +} + +define { , , } @ld3.nxv6f64( %Pg, *%addr) { +; CHECK-LABEL: ld3.nxv6f64: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-24 +; CHECK-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -24 +%base_ptr = bitcast * %base to double * +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64( %Pg, double *%base_ptr) +ret { , , } %res +} + +; ; ld4b +define { , , , } @ld4.nxv64i8( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #4 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 4 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_lower_bound( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_lower_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #-32 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -32 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_upper_bound( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_upper_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #28 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 28 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_not_multiple_of_4_01( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_01: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #5 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 5 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_not_multiple_of_4_02( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_02: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #6 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 6 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_not_multiple_of_4_03( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_03: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #7 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 7 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_outside_lower_bound( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_outside_lower_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov x9, #-576 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9) +; xM = -9 * 2^6 +; xP = RDVL * 2^-4 +; xOFFSET = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36 +%base = getelementptr , * %addr, i64 -36 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv64i8_outside_upper_bound( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv64i8_outside_upper_bound: +; CHECK: // %bb.0: +; CHECK-NEXT: rdvl x8, #1 +; CHECK-NEXT: mov w9, #512 +; CHECK-NEXT: lsr x8, x8, #4 +; CHECK-NEXT: mul x8, x8, x9 +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8] +; CHECK-NEXT: ret +; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #16) #2) +; xM = 2^9 +; xP = RDVL * 2^-4 +; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32 +%base = getelementptr , * %addr, i64 32 +%base_ptr = bitcast * %base to i8 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%base_ptr) +ret { , , , } %res +} + +; ld4h +define { , , , } @ld4.nxv32i16( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #8 +; CHECK-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 8 +%base_ptr = bitcast * %base to i16 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16( %Pg, i16 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv32f16( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv32f16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #28 +; CHECK-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 28 +%base_ptr = bitcast * %base to half * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16( %Pg, half *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv32bf16( %Pg, *%addr) #0 { +; CHECK-LABEL: ld4.nxv32bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-32 +; CHECK-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -32 +%base_ptr = bitcast * %base to bfloat * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16( %Pg, bfloat *%base_ptr) +ret { , , , } %res +} + +; ld4w +define { , , , } @ld4.nxv16i32( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #28 +; CHECK-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 28 +%base_ptr = bitcast * %base to i32 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32( %Pg, i32 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv16f32( %Pg, * %addr) { +; CHECK-LABEL: ld4.nxv16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-32 +; CHECK-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -32 +%base_ptr = bitcast * %base to float * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32( %Pg, float *%base_ptr) +ret { , , , } %res +} + +; ld4d +define { , , , } @ld4.nxv8i64( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv8i64: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #28 +; CHECK-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 28 +%base_ptr = bitcast * %base to i64 * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64( %Pg, i64 *%base_ptr) +ret { , , , } %res +} + +define { , , , } @ld4.nxv8f64( %Pg, *%addr) { +; CHECK-LABEL: ld4.nxv8f64: +; CHECK: // %bb.0: +; CHECK-NEXT: addvl x8, x0, #-32 +; CHECK-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8] +; CHECK-NEXT: ret +%base = getelementptr , * %addr, i64 -32 +%base_ptr = bitcast * %base to double * +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64( %Pg, double * %base_ptr) +ret { , , , } %res +} + +declare { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(, i8*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16(, i16*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32(, i32*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64(, i64*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16(, half*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16(, bfloat*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32(, float*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64(, double*) + +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(, i8*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16(, i16*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32(, i32*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64(, i64*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16(, half*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16(, bfloat*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32(, float*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64(, double*) + +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(, i8*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16(, i16*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32(, i32*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64(, i64*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16(, half*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16(, bfloat*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32(, float*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64(, double*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll @@ -0,0 +1,284 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s + +; ld2b +define { , } @ld2.nxv32i8( %Pg, i8 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret +%addr2 = getelementptr i8, i8 * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8( %Pg, i8 *%addr2) +ret { , } %res +} + +; ld2h +define { , } @ld2.nxv16i16( %Pg, i16 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr i16, i16 * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16( %Pg, i16 *%addr2) +ret { , } %res +} + +define { , } @ld2.nxv16f16( %Pg, half *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv16f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr half, half * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16( %Pg, half *%addr2) +ret { , } %res +} + +define { , } @ld2.nxv16bf16( %Pg, bfloat *%addr, i64 %a) #0 { +; CHECK-LABEL: ld2.nxv16bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr bfloat, bfloat * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16( %Pg, bfloat *%addr2) +ret { , } %res +} + +; ld2w +define { , } @ld2.nxv8i32( %Pg, i32 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr i32, i32 * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32( %Pg, i32 *%addr2) +ret { , } %res +} + +define { , } @ld2.nxv8f32( %Pg, float *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv8f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr float, float * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32( %Pg, float *%addr2) +ret { , } %res +} + +; ld2d +define { , } @ld2.nxv4i64( %Pg, i64 *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr i64, i64 * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64( %Pg, i64 *%addr2) +ret { , } %res +} + +define { , } @ld2.nxv4f64( %Pg, double *%addr, i64 %a) { +; CHECK-LABEL: ld2.nxv4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr double, double * %addr, i64 %a +%res = call { , } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64( %Pg, double *%addr2) +ret { , } %res +} + +; ld3b +define { , , } @ld3.nxv48i8( %Pg, i8 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv48i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret +%addr2 = getelementptr i8, i8 * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8( %Pg, i8 *%addr2) +ret { , , } %res +} + +; ld3h +define { , , } @ld3.nxv24i16( %Pg, i16 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv24i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr i16, i16 * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16( %Pg, i16 *%addr2) +ret { , , } %res +} + +define { , , } @ld3.nxv24f16( %Pg, half *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv24f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr half, half * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16( %Pg, half *%addr2) +ret { , , } %res +} + +define { , , } @ld3.nxv24bf16( %Pg, bfloat *%addr, i64 %a) #0 { +; CHECK-LABEL: ld3.nxv24bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr bfloat, bfloat * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16( %Pg, bfloat *%addr2) +ret { , , } %res +} + +; ld3w +define { , , } @ld3.nxv12i32( %Pg, i32 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv12i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr i32, i32 * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32( %Pg, i32 *%addr2) +ret { , , } %res +} + +define { , , } @ld3.nxv12f32( %Pg, float *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv12f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr float, float * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32( %Pg, float *%addr2) +ret { , , } %res +} + +; ld3d +define { , , } @ld3.nxv6i64( %Pg, i64 *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv6i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr i64, i64 * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64( %Pg, i64 *%addr2) +ret { , , } %res +} + +define { , , } @ld3.nxv6f64( %Pg, double *%addr, i64 %a) { +; CHECK-LABEL: ld3.nxv6f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr double, double * %addr, i64 %a +%res = call { , , } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64( %Pg, double *%addr2) +ret { , , } %res +} + +; ld4b +define { , , , } @ld4.nxv64i8( %Pg, i8 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv64i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x1] +; CHECK-NEXT: ret +%addr2 = getelementptr i8, i8 * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8( %Pg, i8 *%addr2) +ret { , , , } %res +} + +; ld4h +define { , , , } @ld4.nxv32i16( %Pg, i16 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv32i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr i16, i16 * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16( %Pg, i16 *%addr2) +ret { , , , } %res +} + +define { , , , } @ld4.nxv32f16( %Pg, half *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv32f16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr half, half * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16( %Pg, half *%addr2) +ret { , , , } %res +} + +define { , , , } @ld4.nxv32bf16( %Pg, bfloat *%addr, i64 %a) #0 { +; CHECK-LABEL: ld4.nxv32bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1] +; CHECK-NEXT: ret +%addr2 = getelementptr bfloat, bfloat * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16( %Pg, bfloat *%addr2) +ret { , , , } %res +} + +; ld4w +define { , , , } @ld4.nxv16i32( %Pg, i32 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv16i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr i32, i32 * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32( %Pg, i32 *%addr2) +ret { , , , } %res +} + +define { , , , } @ld4.nxv16f32( %Pg, float *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv16f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2] +; CHECK-NEXT: ret +%addr2 = getelementptr float, float * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32( %Pg, float *%addr2) +ret { , , , } %res +} + +; ld4d +define { , , , } @ld4.nxv8i64( %Pg, i64 *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv8i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr i64, i64 * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64( %Pg, i64 *%addr2) +ret { , , , } %res +} + +define { , , , } @ld4.nxv8f64( %Pg, double *%addr, i64 %a) { +; CHECK-LABEL: ld4.nxv8f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3] +; CHECK-NEXT: ret +%addr2 = getelementptr double, double * %addr, i64 %a +%res = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64( %Pg, double *%addr2) +ret { , , , } %res +} + +declare { , } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(, i8*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16(, i16*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32(, i32*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64(, i64*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16(, half*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16(, bfloat*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32(, float*) +declare { , } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64(, double*) + +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(, i8*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16(, i16*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32(, i32*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64(, i64*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16(, half*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16(, bfloat*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32(, float*) +declare { , , } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64(, double*) + +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(, i8*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16(, i16*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32(, i32*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64(, i64*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16(, half*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16(, bfloat*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32(, float*) +declare { , , , } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64(, double*) + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" }