diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1277,7 +1277,7 @@ [IntrWriteMem, IntrArgMemOnly]>; -class SVE_gather_prf_scalar_base_vector_offset_scaled +class SVE_gather_prf_SV : Intrinsic<[], [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate @@ -1287,7 +1287,7 @@ ], [IntrInaccessibleMemOrArgMemOnly, NoCapture<1>, ImmArg<3>]>; -class SVE_gather_prf_vector_base_scalar_offset +class SVE_gather_prf_VS : Intrinsic<[], [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate @@ -1328,29 +1328,29 @@ // Scalar + 32-bit scaled offset vector, zero extend, packed and // unpacked. -def int_aarch64_sve_prfb_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfh_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfw_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfd_gather_scaled_uxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfb_gather_uxtw_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfh_gather_uxtw_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfw_gather_uxtw_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfd_gather_uxtw_index : SVE_gather_prf_SV; // Scalar + 32-bit scaled offset vector, sign extend, packed and // unpacked. -def int_aarch64_sve_prfb_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfw_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfh_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfd_gather_scaled_sxtw : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfb_gather_sxtw_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfw_gather_sxtw_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfh_gather_sxtw_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfd_gather_sxtw_index : SVE_gather_prf_SV; // Scalar + 64-bit scaled offset vector. -def int_aarch64_sve_prfb_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfh_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfw_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; -def int_aarch64_sve_prfd_gather_scaled : SVE_gather_prf_scalar_base_vector_offset_scaled; +def int_aarch64_sve_prfb_gather_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfh_gather_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfw_gather_index : SVE_gather_prf_SV; +def int_aarch64_sve_prfd_gather_index : SVE_gather_prf_SV; // Vector + scalar. -def int_aarch64_sve_prfb_gather : SVE_gather_prf_vector_base_scalar_offset; -def int_aarch64_sve_prfh_gather : SVE_gather_prf_vector_base_scalar_offset; -def int_aarch64_sve_prfw_gather : SVE_gather_prf_vector_base_scalar_offset; -def int_aarch64_sve_prfd_gather : SVE_gather_prf_vector_base_scalar_offset; +def int_aarch64_sve_prfb_gather_scalar_offset : SVE_gather_prf_VS; +def int_aarch64_sve_prfh_gather_scalar_offset : SVE_gather_prf_VS; +def int_aarch64_sve_prfw_gather_scalar_offset : SVE_gather_prf_VS; +def int_aarch64_sve_prfd_gather_scalar_offset : SVE_gather_prf_VS; // // Scalar to vector operations diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -13019,9 +13019,9 @@ return DAG.getNode(N->getOpcode(), DL, DAG.getVTList(MVT::Other), Ops); } -/// Combines a node carrying the intrinsic `aarch64_sve_prf_gather` into a -/// node that uses `aarch64_sve_prf_gather_scaled_uxtw` when the scalar -/// offset passed to `aarch64_sve_prf_gather` is not a valid immediate for +/// Combines a node carrying the intrinsic `aarch64_sve_prf_gather` into a +/// node that uses `aarch64_sve_prf_gather_scaled_uxtw` when the scalar +/// offset passed to `aarch64_sve_prf_gather` is not a valid immediate for /// the sve gather prefetch instruction with vector plus immediate addressing /// mode. static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, @@ -13107,30 +13107,30 @@ case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (cast(N->getOperand(1))->getZExtValue()) { - case Intrinsic::aarch64_sve_prfb_gather: + case Intrinsic::aarch64_sve_prfb_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfb_gather_uxtw_index, 1 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_prfh_gather: + case Intrinsic::aarch64_sve_prfh_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfh_gather_uxtw_index, 2 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_prfw_gather: + case Intrinsic::aarch64_sve_prfw_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfw_gather_uxtw_index, 4 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_prfd_gather: + case Intrinsic::aarch64_sve_prfd_gather_scalar_offset: return combineSVEPrefetchVecBaseImmOff( - N, DAG, Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw, + N, DAG, Intrinsic::aarch64_sve_prfd_gather_uxtw_index, 8 /*=ScalarSizeInBytes*/); - case Intrinsic::aarch64_sve_prfb_gather_scaled_uxtw: - case Intrinsic::aarch64_sve_prfb_gather_scaled_sxtw: - case Intrinsic::aarch64_sve_prfh_gather_scaled_uxtw: - case Intrinsic::aarch64_sve_prfh_gather_scaled_sxtw: - case Intrinsic::aarch64_sve_prfw_gather_scaled_uxtw: - case Intrinsic::aarch64_sve_prfw_gather_scaled_sxtw: - case Intrinsic::aarch64_sve_prfd_gather_scaled_uxtw: - case Intrinsic::aarch64_sve_prfd_gather_scaled_sxtw: + case Intrinsic::aarch64_sve_prfb_gather_uxtw_index: + case Intrinsic::aarch64_sve_prfb_gather_sxtw_index: + case Intrinsic::aarch64_sve_prfh_gather_uxtw_index: + case Intrinsic::aarch64_sve_prfh_gather_sxtw_index: + case Intrinsic::aarch64_sve_prfw_gather_uxtw_index: + case Intrinsic::aarch64_sve_prfw_gather_sxtw_index: + case Intrinsic::aarch64_sve_prfd_gather_uxtw_index: + case Intrinsic::aarch64_sve_prfd_gather_sxtw_index: return legalizeSVEGatherPrefetchOffsVec(N, DAG); case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -880,37 +880,37 @@ // Gather prefetch using scaled 32-bit offsets, e.g. // prfh pldl1keep, p0, [x0, z0.s, uxtw #1] - defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>; - defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16, int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>; - defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32, int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>; - defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64, int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>; + defm PRFB_S : sve_mem_32b_prfm_sv_scaled<0b00, "prfb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, int_aarch64_sve_prfb_gather_sxtw_index, int_aarch64_sve_prfb_gather_uxtw_index>; + defm PRFH_S : sve_mem_32b_prfm_sv_scaled<0b01, "prfh", ZPR32ExtSXTW16, ZPR32ExtUXTW16, int_aarch64_sve_prfh_gather_sxtw_index, int_aarch64_sve_prfh_gather_uxtw_index>; + defm PRFW_S : sve_mem_32b_prfm_sv_scaled<0b10, "prfw", ZPR32ExtSXTW32, ZPR32ExtUXTW32, int_aarch64_sve_prfw_gather_sxtw_index, int_aarch64_sve_prfw_gather_uxtw_index>; + defm PRFD_S : sve_mem_32b_prfm_sv_scaled<0b11, "prfd", ZPR32ExtSXTW64, ZPR32ExtUXTW64, int_aarch64_sve_prfd_gather_sxtw_index, int_aarch64_sve_prfd_gather_uxtw_index>; // Gather prefetch using unpacked, scaled 32-bit offsets, e.g. // prfh pldl1keep, p0, [x0, z0.d, uxtw #1] - defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_prfb_gather_scaled_sxtw, int_aarch64_sve_prfb_gather_scaled_uxtw>; - defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16, int_aarch64_sve_prfh_gather_scaled_sxtw, int_aarch64_sve_prfh_gather_scaled_uxtw>; - defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32, int_aarch64_sve_prfw_gather_scaled_sxtw, int_aarch64_sve_prfw_gather_scaled_uxtw>; - defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64, int_aarch64_sve_prfd_gather_scaled_sxtw, int_aarch64_sve_prfd_gather_scaled_uxtw>; + defm PRFB_D : sve_mem_64b_prfm_sv_ext_scaled<0b00, "prfb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, int_aarch64_sve_prfb_gather_sxtw_index, int_aarch64_sve_prfb_gather_uxtw_index>; + defm PRFH_D : sve_mem_64b_prfm_sv_ext_scaled<0b01, "prfh", ZPR64ExtSXTW16, ZPR64ExtUXTW16, int_aarch64_sve_prfh_gather_sxtw_index, int_aarch64_sve_prfh_gather_uxtw_index>; + defm PRFW_D : sve_mem_64b_prfm_sv_ext_scaled<0b10, "prfw", ZPR64ExtSXTW32, ZPR64ExtUXTW32, int_aarch64_sve_prfw_gather_sxtw_index, int_aarch64_sve_prfw_gather_uxtw_index>; + defm PRFD_D : sve_mem_64b_prfm_sv_ext_scaled<0b11, "prfd", ZPR64ExtSXTW64, ZPR64ExtUXTW64, int_aarch64_sve_prfd_gather_sxtw_index, int_aarch64_sve_prfd_gather_uxtw_index>; // Gather prefetch using scaled 64-bit offsets, e.g. // prfh pldl1keep, p0, [x0, z0.d, lsl #1] - defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8, int_aarch64_sve_prfb_gather_scaled>; - defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_prfh_gather_scaled>; - defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_prfw_gather_scaled>; - defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_prfd_gather_scaled>; + defm PRFB_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b00, "prfb", ZPR64ExtLSL8, int_aarch64_sve_prfb_gather_index>; + defm PRFH_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b01, "prfh", ZPR64ExtLSL16, int_aarch64_sve_prfh_gather_index>; + defm PRFW_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b10, "prfw", ZPR64ExtLSL32, int_aarch64_sve_prfw_gather_index>; + defm PRFD_D_SCALED : sve_mem_64b_prfm_sv_lsl_scaled<0b11, "prfd", ZPR64ExtLSL64, int_aarch64_sve_prfd_gather_index>; // Gather prefetch using 32/64-bit pointers with offset, e.g. // prfh pldl1keep, p0, [z0.s, #16] // prfh pldl1keep, p0, [z0.d, #16] - defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>; - defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>; - defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>; - defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>; - - defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather>; - defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather>; - defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather>; - defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather>; + defm PRFB_S_PZI : sve_mem_32b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather_scalar_offset>; + defm PRFH_S_PZI : sve_mem_32b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather_scalar_offset>; + defm PRFW_S_PZI : sve_mem_32b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather_scalar_offset>; + defm PRFD_S_PZI : sve_mem_32b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather_scalar_offset>; + + defm PRFB_D_PZI : sve_mem_64b_prfm_vi<0b00, "prfb", imm0_31, int_aarch64_sve_prfb_gather_scalar_offset>; + defm PRFH_D_PZI : sve_mem_64b_prfm_vi<0b01, "prfh", uimm5s2, int_aarch64_sve_prfh_gather_scalar_offset>; + defm PRFW_D_PZI : sve_mem_64b_prfm_vi<0b10, "prfw", uimm5s4, int_aarch64_sve_prfw_gather_scalar_offset>; + defm PRFD_D_PZI : sve_mem_64b_prfm_vi<0b11, "prfd", uimm5s8, int_aarch64_sve_prfd_gather_scalar_offset>; defm ADR_SXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_sxtw<0b00, "adr">; defm ADR_UXTW_ZZZ_D : sve_int_bin_cons_misc_0_a_uxtw<0b01, "adr">; diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll @@ -0,0 +1,203 @@ +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s + +; PRFB , , [, .S, ] -> 32-bit indexes +define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32: +; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32: +; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, sxtw] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFB , , [, .D, ] -> 32-bit unpacked indexes + +define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64: +; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64: +; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, sxtw] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } +; PRFB , , [, .D] -> 64-bit indexes +define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64: +; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfb.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; PRFH , , [, .S, ] -> 32-bit indexes +define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32: +; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32: +; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, sxtw #1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFH , , [, .D, #1] -> 32-bit unpacked indexes +define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64: +; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64: +; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, sxtw #1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFH , , [, .D] -> 64-bit indexes +define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64: +; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, lsl #1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfh.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; PRFW , , [, .S, ] -> 32-bit indexes +define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32: +; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32: +; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, sxtw #2] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFW , , [, .D, #2] -> 32-bit unpacked indexes +define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64: +; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64: +; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, sxtw #2] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFW , , [, .D] -> 64-bit indexes +define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64: +; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, lsl #2] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfw.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; PRFD , , [, .S, ] -> 32-bit indexes +define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32: +; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32: +; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, sxtw #3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFD , , [, .D, #3] -> 32-bit unpacked indexes +define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64: +; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64: +; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, sxtw #3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +; PRFD , , [, .D] -> 64-bit indexes +define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64( %Pg, i8* %base, %indexes) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64: +; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, lsl #3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.prfd.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 1) + ret void + } + +declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) + +declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) + +declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) + +declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.index.nx2vi64( %Pg, i8* %base, %indexes, i32 %prfop) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll deleted file mode 100644 --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scaled-offset.ll +++ /dev/null @@ -1,200 +0,0 @@ -; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s - -; PRFB , , [, .S, ] -> 32-bit scaled offset -define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32: -; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32: -; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, sxtw] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFB , , [, .D, ] -> 32-bit unpacked scaled offset - -define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64: -; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64: -; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, sxtw] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } -; PRFB , , [, .D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64: -; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -; PRFH , , [, .S, ] -> 32-bit scaled offset -define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32: -; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32: -; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, sxtw #1] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFH , , [, .D, #1] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64: -; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64: -; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, sxtw #1] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFH , , [, .D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64: -; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, lsl #1] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -; PRFW , , [, .S, ] -> 32-bit scaled offset -define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32: -; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32: -; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, sxtw #2] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFW , , [, .D, #2] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64: -; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64: -; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, sxtw #2] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFW , , [, .D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64: -; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, lsl #2] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -; PRFD , , [, .S, ] -> 32-bit scaled offset -define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32: -; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32: -; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, sxtw #3] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFD , , [, .D, #3] -> 32-bit unpacked scaled offset -define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64: -; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64: -; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, sxtw #3] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -; PRFD , , [, .D] -> 64-bit scaled offset -define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64( %Pg, i8* %base, %offset) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64: -; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, lsl #3] -; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 1) - ret void - } - -declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64( %Pg, i8* %base, %offset, i32 %prfop) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-imm-offset.ll @@ -1,82 +1,82 @@ ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; PRFB , , [.S{, #}] -> 32-bit element -define void @llvm_aarch64_sve_prfb_gather_nx4vi32( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32: ; CHECK-NEXT: prfb pldl1strm, p0, [z0.s, #7] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx4vi32( %Pg, %bases, i64 7, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 7, i32 1) ret void } ; PRFB , , [.D{, #}] -> 64-bit element -define void @llvm_aarch64_sve_prfb_gather_nx2vi64( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64: ; CHECK-NEXT: prfb pldl1strm, p0, [z0.d, #7] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx2vi64( %Pg, %bases, i64 7, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 7, i32 1) ret void } ; PRFH , , [.S{, #}] -> 32-bit element -define void @llvm_aarch64_sve_prfh_gather_nx4vi32( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32: ; CHECK-NEXT: prfh pldl1strm, p0, [z0.s, #6] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 6, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 6, i32 1) ret void } ; PRFH , , [.D{, #}] -> 64-bit element -define void @llvm_aarch64_sve_prfh_gather_nx2vi64( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64: ; CHECK-NEXT: prfh pldl1strm, p0, [z0.d, #6] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 6, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 6, i32 1) ret void } ; PRFW , , [.S{, #}] -> 32-bit element -define void @llvm_aarch64_sve_prfw_gather_nx4vi32( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32: ; CHECK-NEXT: prfw pldl1strm, p0, [z0.s, #12] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 12, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 12, i32 1) ret void } ; PRFW , , [.D{, #}] -> 64-bit element -define void @llvm_aarch64_sve_prfw_gather_nx2vi64( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64: ; CHECK-NEXT: prfw pldl1strm, p0, [z0.d, #12] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 12, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 12, i32 1) ret void } ; PRFD , , [.S{, #}] -> 32-bit element -define void @llvm_aarch64_sve_prfd_gather_nx4vi32( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32: ; CHECK-NEXT: prfd pldl1strm, p0, [z0.s, #16] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 16, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 16, i32 1) ret void } ; PRFD , , [.D{, #}] -> 64-bit element -define void @llvm_aarch64_sve_prfd_gather_nx2vi64( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64: ; CHECK-NEXT: prfd pldl1strm, p0, [z0.d, #16] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 16, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 16, i32 1) ret void } -declare void @llvm.aarch64.sve.prfb.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfb.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll @@ -1,286 +1,286 @@ ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s ; PRFB , , [.S{, #}] -> 32-bit element, imm = 0, 1, ..., 31 -define void @llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_runtime_offset: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #32 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx4vi32( %Pg, %bases, i64 32, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 32, i32 1) ret void } -define void @llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx4vi32( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 -1, i32 1) ret void } ; PRFB , , [.D{, #}] -> 64-bit element, imm = 0, 1, ..., 31 -define void @llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_runtime_offset: ; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #32 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N]], z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx2vi64( %Pg, %bases, i64 32, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 32, i32 1) ret void } -define void @llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfb pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfb.gather.nx2vi64( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 -1, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFH , , [.S{, #}] -> 32-bit element, imm = 0, 2, ..., 62 -define void @llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_runtime_offset: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #63 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 63, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 63, i32 1) ret void } -define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 33, i32 1) ret void } ; PRFH , , [.D{, #}] -> 64-bit element, imm = 0, 2, ..., 62 -define void @llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_runtime_offset: ; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #63 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N]], z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 63, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 63, i32 1) ret void } -define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2: +define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfh pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #1] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 33, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFW , , [.S{, #}] -> 32-bit element, imm = 0, 4, ..., 124 -define void @llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_runtime_offset: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 33, i32 1) ret void } ; PRFW , , [.D{, #}] -> 64-bit element, imm = 0, 4, ..., 124 -define void @llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_runtime_offset: ; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N]], z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4: +define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfw pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #2] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 33, i32 1) ret void } ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; PRFD , , [.S{, #}] -> 32-bit element, imm = 0, 8, ..., 248 -define void @llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_runtime_offset: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_runtime_offset: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.s, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 33, i32 1) ret void } ; PRFD , , [.D{, #}] -> 64-bit element, imm = 0, 4, ..., 248 -define void @llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset( %bases, i64 %imm, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_runtime_offset: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_runtime_offset( %bases, i64 %offset, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_runtime_offset: ; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 1) ret void } -define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_upper_bound: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound: ; CHECK-NEXT: mov w[[N:[0-9]+]], #125 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N]], z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 125, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 125, i32 1) ret void } -define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_lower_bound: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound: ; CHECK-NEXT: mov x[[N:[0-9]+]], #-1 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 -1, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 -1, i32 1) ret void } -define void @llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8( %bases, %Pg) nounwind { -; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8: +define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8( %bases, %Pg) nounwind { +; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8: ; CHECK-NEXT: mov w[[N:[0-9]+]], #33 ; CHECK-NEXT: prfd pldl1strm, p0, [x[[N:[0-9]+]], z0.d, uxtw #3] ; CHECK-NEXT: ret - call void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 33, i32 1) + call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 33, i32 1) ret void } -declare void @llvm.aarch64.sve.prfb.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfb.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfh.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfw.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.nx4vi32( %Pg, %bases, i64 %imm, i32 %prfop) -declare void @llvm.aarch64.sve.prfd.gather.nx2vi64( %Pg, %bases, i64 %imm, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32( %Pg, %bases, i64 %offset, i32 %prfop) +declare void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64( %Pg, %bases, i64 %offset, i32 %prfop)