diff --git a/clang/include/clang/Basic/BuiltinsVE.def b/clang/include/clang/Basic/BuiltinsVE.def new file mode 100644 --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsVE.def @@ -0,0 +1,27 @@ +//===--- BuiltinsVE.def - VE Builtin function database ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the VE-specific builtin function database. Users of +// this file must define the BUILTIN macro to make use of this information. +// +//===----------------------------------------------------------------------===// + +// The format of this database matches clang/Basic/Builtins.def. + +BUILTIN(__builtin_ve_vl_svob, "v", "n") +BUILTIN(__builtin_ve_vl_pack_f32p, "ULifC*fC*", "n") +BUILTIN(__builtin_ve_vl_pack_f32a, "ULifC*", "n") + +BUILTIN(__builtin_ve_vl_extract_vm512u, "V4ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_extract_vm512l, "V4ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_insert_vm512u, "V8ULiV8ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_insert_vm512l, "V8ULiV8ULiV4ULi", "n") + +#include "clang/Basic/BuiltinsVEVL.gen.def" + +#undef BUILTIN diff --git a/clang/include/clang/Basic/BuiltinsVEVL.gen.def b/clang/include/clang/Basic/BuiltinsVEVL.gen.def new file mode 100644 --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsVEVL.gen.def @@ -0,0 +1,1301 @@ +BUILTIN(__builtin_ve_vl_vld_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vld_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldu_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldu_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldunc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldunc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldlsx_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldlsx_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldlsxnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldlsxnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldlzx_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldlzx_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldlzxnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldlzxnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vld2d_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vld2d_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vld2dnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vld2dnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldu2d_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldu2d_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldu2dnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldu2dnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldl2dsx_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldl2dsx_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldl2dsxnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldl2dsxnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldl2dzx_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldl2dzx_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssl, "V256dLUivC*Ui", "n") +BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssvl, "V256dLUivC*V256dUi", "n") +BUILTIN(__builtin_ve_vl_vst_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstnc_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstncot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstu_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstunc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstunc_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstuot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstuot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstuncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstuncot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstl_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstlnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstlnc_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstlot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstlot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstlncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstlncot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vst2d_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2d_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vst2dnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2dnc_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vst2dot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2dot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vst2dncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2dncot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstu2d_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2d_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstu2dnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2dnc_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstu2dot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2dot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstu2dncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2dncot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstl2d_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2d_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstl2dnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2dnc_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstl2dot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2dot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vstl2dncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2dncot_vssml, "vV256dLUiv*V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pfchv_ssl, "vLivC*Ui", "n") +BUILTIN(__builtin_ve_vl_pfchvnc_ssl, "vLivC*Ui", "n") +BUILTIN(__builtin_ve_vl_lsv_vvss, "V256dV256dUiLUi", "n") +BUILTIN(__builtin_ve_vl_lvsl_svs, "LUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_lvsd_svs, "dV256dUi", "n") +BUILTIN(__builtin_ve_vl_lvss_svs, "fV256dUi", "n") +BUILTIN(__builtin_ve_vl_lvm_mmss, "V4ULiV4ULiLUiLUi", "n") +BUILTIN(__builtin_ve_vl_lvm_MMss, "V8ULiV8ULiLUiLUi", "n") +BUILTIN(__builtin_ve_vl_svm_sms, "LUiV4ULiLUi", "n") +BUILTIN(__builtin_ve_vl_svm_sMs, "LUiV8ULiLUi", "n") +BUILTIN(__builtin_ve_vl_vbrdd_vsl, "V256ddUi", "n") +BUILTIN(__builtin_ve_vl_vbrdd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdd_vsmvl, "V256ddV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdl_vsl, "V256dLiUi", "n") +BUILTIN(__builtin_ve_vl_vbrdl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdl_vsmvl, "V256dLiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrds_vsl, "V256dfUi", "n") +BUILTIN(__builtin_ve_vl_vbrds_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrds_vsmvl, "V256dfV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdw_vsl, "V256diUi", "n") +BUILTIN(__builtin_ve_vl_vbrdw_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdw_vsmvl, "V256diV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvbrd_vsl, "V256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvbrd_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvbrd_vsMvl, "V256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmv_vsvl, "V256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmv_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmv_vsvmvl, "V256dUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vsvl, "V256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vsvmvl, "V256dUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vsvl, "V256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vsvmvl, "V256dUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vsvl, "V256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vsvmvl, "V256dUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulslw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulslw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulslw_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulslw_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vsvl, "V256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vsvmvl, "V256dUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvsl, "V256dV256dUiUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvsvl, "V256dV256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvsmvl, "V256dV256dUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvsl, "V256dV256diUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvsmvl, "V256dV256diV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvsl, "V256dV256diUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvsmvl, "V256dV256diV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvsl, "V256dV256dLiUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvsvl, "V256dV256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvsmvl, "V256dV256dLiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vsvl, "V256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vsvmvl, "V256dUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vsvmvl, "V256diV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vsvmvl, "V256dLiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandlo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandlo_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandlo_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandlo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandlo_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandup_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandup_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvandup_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorlo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorlo_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorlo_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorlo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorlo_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorup_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorup_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvorup_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorlo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorlo_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorlo_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorlo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorlo_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorup_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorup_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxorup_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvlo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvlo_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvlo_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvlo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvlo_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvup_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvup_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqvup_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vseq_vl, "V256dUi", "n") +BUILTIN(__builtin_ve_vl_vseq_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvseqlo_vl, "V256dUi", "n") +BUILTIN(__builtin_ve_vl_pvseqlo_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsequp_vl, "V256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsequp_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvseq_vl, "V256dUi", "n") +BUILTIN(__builtin_ve_vl_pvseq_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslllo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslllo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslllo_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvslllo_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslllo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslllo_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsllup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsllup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsllup_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsllup_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsllup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsllup_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrllo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrllo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrllo_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsrllo_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrllo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrllo_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrlup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrlup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrlup_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsrlup_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrlup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrlup_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslaw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslaw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslaw_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vslaw_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslaw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslaw_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslalo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslalo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslalo_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvslalo_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslalo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslalo_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslaup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslaup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslaup_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvslaup_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslaup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvslaup_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsraw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsraw_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsraw_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsraw_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsraw_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsraw_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsralo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsralo_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsralo_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsralo_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsralo_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsralo_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsraup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsraup_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsraup_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsraup_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsraup_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsraup_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvsMvl, "V256dV256dLUiV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvsl, "V256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvsmvl, "V256dV256dLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsfa_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsfa_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsfa_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsqrtd_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsqrtd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsqrts_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsqrts_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vsvmvl, "V256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vsvmvl, "V256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vsvMvl, "V256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvsvl, "V256dV256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vsvvmvl, "V256ddV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvsvmvl, "V256dV256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vsvvvl, "V256dfV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvsvl, "V256dV256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vsvvmvl, "V256dfV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvsvmvl, "V256dV256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvvvMvl, "V256dV256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vsvvMvl, "V256dLUiV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvsvMvl, "V256dV256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvsvl, "V256dV256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vsvvmvl, "V256ddV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvsvmvl, "V256dV256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvsvl, "V256dV256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vsvvmvl, "V256dfV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvsvmvl, "V256dV256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvvvMvl, "V256dV256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vsvvMvl, "V256dLUiV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvsvMvl, "V256dV256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvsvl, "V256dV256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vsvvmvl, "V256ddV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvsvmvl, "V256dV256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vsvvvl, "V256dfV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvsvl, "V256dV256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vsvvmvl, "V256dfV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvsvmvl, "V256dV256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvvvMvl, "V256dV256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vsvvMvl, "V256dLUiV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvsvMvl, "V256dV256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvl, "V256dV256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvmvl, "V256ddV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvmvl, "V256dV256ddV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvl, "V256dV256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvmvl, "V256dV256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvmvl, "V256dfV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvmvl, "V256dV256dfV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvl, "V256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvMvl, "V256dV256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvMvl, "V256dLUiV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvMvl, "V256dV256dLUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrcpd_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrcpd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrcps_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrcps_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvrcp_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvrcp_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrtd_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrtd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrts_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrts_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvrsqrt_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvrsqrt_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrtdnex_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrtdnex_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrtsnex_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrsqrtsnex_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsx_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzx_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssx_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszx_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtws_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtws_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtws_vvMvl, "V256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvMvl, "V256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtld_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtld_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtld_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtldrz_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtldrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtldrz_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtdw_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtdw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtsw_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtsw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtsw_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtsw_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtdl_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtdl_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtds_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtds_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtsd_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtsd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vvvml, "V256dV256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vvvmvl, "V256dV256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vsvml, "V256dLUiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vsvmvl, "V256dLUiV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vvvMl, "V256dV256dV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vvvMvl, "V256dV256dV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vsvMl, "V256dUiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vsvMvl, "V256dUiV256dV8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vshf_vvvsl, "V256dV256dV256dLUiUi", "n") +BUILTIN(__builtin_ve_vl_vshf_vvvsvl, "V256dV256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcp_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vex_vvmvl, "V256dV256dV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklat_ml, "V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklaf_ml, "V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloat_ml, "V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupat_ml, "V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloaf_ml, "V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupaf_ml, "V4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkat_Ml, "V8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkaf_Ml, "V8ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklne_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklne_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklle_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklle_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwne_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwne_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwle_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwle_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlone_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupne_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlone_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupne_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlole_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuple_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlole_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuple_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgt_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgt_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlt_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlt_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwne_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwne_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweq_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweq_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwge_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwge_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwle_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwle_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnum_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnum_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgtnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgtnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwltnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwltnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnenan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnenan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweqnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweqnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgenan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgenan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlenan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlenan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdne_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdne_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdle_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdle_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksne_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksne_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksle_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksle_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplt_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplt_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslone_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupne_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslone_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupne_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeq_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeq_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupge_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupge_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslole_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuple_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslole_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuple_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnum_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnum_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvl, "V4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvml, "V4ULiV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgt_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgt_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslt_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslt_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksne_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksne_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseq_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseq_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksge_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksge_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksle_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksle_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnum_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnum_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgtnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgtnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksltnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksltnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnenan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnenan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseqnan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseqnan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgenan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgenan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslenan_Mvl, "V8ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslenan_MvMl, "V8ULiV256dV8ULiUi", "n") +BUILTIN(__builtin_ve_vl_vsumwsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsumwsx_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vsumwzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsumwzx_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vsuml_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsuml_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfsumd_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsumd_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vfsums_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsums_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswfstzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswfstzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswlstzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxswlstzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswfstsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswfstsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswlstsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswlstsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswfstzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswfstzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswlstzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminswlstzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxslfst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxslfst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxsllst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrmaxsllst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminslfst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminslfst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminsllst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrminsllst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxdfst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxdfst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxdlst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxdlst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxsfst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxsfst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxslst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmaxslst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmindfst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmindfst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmindlst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrmindlst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrminsfst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrminsfst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrminslst_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfrminslst_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrand_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrand_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vror_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vror_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vrxor_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrxor_vvml, "V256dV256dV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgt_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgt_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgt_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgt_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtnc_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtnc_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtnc_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtu_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtu_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtu_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtu_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtunc_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtunc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtunc_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtunc_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsx_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsx_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsx_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsx_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzx_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzx_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzx_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzx_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssl, "V256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssml, "V256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssmvl, "V256dV256dLUiLUiV4ULiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsc_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscnc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscnc_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscot_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscncot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscncot_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscu_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscu_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscunc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscunc_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscuot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscuot_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscuncot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscuncot_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vscl_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscl_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vsclnc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsclnc_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vsclot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsclot_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_vsclncot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsclncot_vvssml, "vV256dV256dLUiLUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_andm_mmm, "V4ULiV4ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_andm_MMM, "V8ULiV8ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_orm_mmm, "V4ULiV4ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_orm_MMM, "V8ULiV8ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_xorm_mmm, "V4ULiV4ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_xorm_MMM, "V8ULiV8ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_eqvm_mmm, "V4ULiV4ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_eqvm_MMM, "V8ULiV8ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_nndm_mmm, "V4ULiV4ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_nndm_MMM, "V8ULiV8ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_negm_mm, "V4ULiV4ULi", "n") +BUILTIN(__builtin_ve_vl_negm_MM, "V8ULiV8ULi", "n") +BUILTIN(__builtin_ve_vl_pcvm_sml, "LUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_lzvm_sml, "LUiV4ULiUi", "n") +BUILTIN(__builtin_ve_vl_tovm_sml, "LUiV4ULiUi", "n") diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h --- a/clang/include/clang/Basic/TargetBuiltins.h +++ b/clang/include/clang/Basic/TargetBuiltins.h @@ -106,6 +106,16 @@ }; } + /// VE builtins + namespace VE { + enum { + LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, +#define BUILTIN(ID, TYPE, ATTRS) BI##ID, +#include "clang/Basic/BuiltinsVE.def" + LastTSBuiltin + }; + } + /// Flags to identify the types for overloaded Neon builtins. /// /// These must be kept in sync with the flags in utils/TableGen/NeonEmitter.h. diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -152,6 +152,8 @@ Group, Flags<[CoreOption]>, DocName<"X86">; def m_riscv_Features_Group : OptionGroup<"">, Group, DocName<"RISCV">; +def m_ve_Features_Group : OptionGroup<"">, + Group, DocName<"VE">; def m_libc_Group : OptionGroup<"">, Group, Flags<[HelpHidden]>; @@ -3162,6 +3164,13 @@ def mvzeroupper : Flag<["-"], "mvzeroupper">, Group; def mno_vzeroupper : Flag<["-"], "mno-vzeroupper">, Group; +def mvevec : Flag<["-"], "mvevec">, Group, + HelpText<"Enable vectorization for VE">, + Flags<[CC1Option]>; +def mno_vevec : Flag<["-"], "mno-vevec">, Group, + HelpText<"Disable vectorization for VE">, + Flags<[CC1Option]>; + // These are legacy user-facing driver-level option spellings. They are always // aliases for options that are spelled using the more common Unix / GNU flag // style of double-dash and equals-joined flags. diff --git a/clang/include/clang/module.modulemap b/clang/include/clang/module.modulemap --- a/clang/include/clang/module.modulemap +++ b/clang/include/clang/module.modulemap @@ -48,6 +48,7 @@ textual header "Basic/BuiltinsX86.def" textual header "Basic/BuiltinsX86_64.def" textual header "Basic/BuiltinsXCore.def" + textual header "Basic/BuiltinsVE.def" textual header "Basic/CodeGenOptions.def" textual header "Basic/DiagnosticOptions.def" textual header "Basic/Features.def" diff --git a/clang/lib/Basic/CMakeLists.txt b/clang/lib/Basic/CMakeLists.txt --- a/clang/lib/Basic/CMakeLists.txt +++ b/clang/lib/Basic/CMakeLists.txt @@ -86,6 +86,7 @@ Targets/WebAssembly.cpp Targets/X86.cpp Targets/XCore.cpp + Targets/VE.cpp TokenKinds.cpp Version.cpp Warnings.cpp diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -33,6 +33,7 @@ #include "Targets/Sparc.h" #include "Targets/SystemZ.h" #include "Targets/TCE.h" +#include "Targets/VE.h" #include "Targets/WebAssembly.h" #include "Targets/X86.h" #include "Targets/XCore.h" @@ -608,6 +609,9 @@ return new LinuxTargetInfo(Triple, Opts); case llvm::Triple::renderscript64: return new LinuxTargetInfo(Triple, Opts); + + case llvm::Triple::ve: + return new LinuxTargetInfo(Triple, Opts); } } } // namespace targets diff --git a/clang/lib/Basic/Targets/VE.h b/clang/lib/Basic/Targets/VE.h new file mode 100644 --- /dev/null +++ b/clang/lib/Basic/Targets/VE.h @@ -0,0 +1,156 @@ +//===--- VE.h - Declare VE target feature support ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares VE TargetInfo objects. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_VE_H +#define LLVM_CLANG_LIB_BASIC_TARGETS_VE_H + +#include "clang/Basic/TargetInfo.h" +#include "clang/Basic/TargetOptions.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Support/Compiler.h" + +namespace clang { +namespace targets { + +class LLVM_LIBRARY_VISIBILITY VETargetInfo : public TargetInfo { + static const Builtin::Info BuiltinInfo[]; + +public: + VETargetInfo(const llvm::Triple &Triple, const TargetOptions &) + : TargetInfo(Triple) { + NoAsmVariants = true; + LongDoubleWidth = 128; + LongDoubleAlign = 128; + LongDoubleFormat = &llvm::APFloat::IEEEquad(); + DoubleAlign = LongLongAlign = 64; + SuitableAlign = 64; + LongWidth = LongAlign = PointerWidth = PointerAlign = 64; + SizeType = UnsignedLong; + PtrDiffType = SignedLong; + IntPtrType = SignedLong; + IntMaxType = SignedLong; + Int64Type = SignedLong; + RegParmMax = 8; + MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; + + WCharType = UnsignedInt; + WIntType = UnsignedInt; + UseZeroLengthBitfieldAlignment = true; + resetDataLayout("e-m:e-i64:64-n32:64-S64-v64:64:64-v128:64:64-v256:64:64-v512:64:64-v1024:64:64-v2048:64:64-v4096:64:64-v8192:64:64-v16384:64:64"); + } + + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override; + + bool hasSjLjLowering() const override { return true; } + + ArrayRef getTargetBuiltins() const override; + + BuiltinVaListKind getBuiltinVaListKind() const override { + return TargetInfo::VoidPtrBuiltinVaList; + } + + CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { + switch (CC) { + default: + return CCCR_Warning; + case CC_C: + case CC_X86RegCall: + return CCCR_OK; + } + } + + const char *getClobbers() const override { return ""; } + + ArrayRef getGCCRegNames() const override { + static const char *const GCCRegNames[] = { + // Regular registers + "sx0", "sx1", "sx2", "sx3", "sx4", "sx5", "sx6", "sx7", + "sx8", "sx9", "sx10", "sx11", "sx12", "sx13", "sx14", "sx15", + "sx16", "sx17", "sx18", "sx19", "sx20", "sx21", "sx22", "sx23", + "sx24", "sx25", "sx26", "sx27", "sx28", "sx29", "sx30", "sx31", + "sx32", "sx33", "sx34", "sx35", "sx36", "sx37", "sx38", "sx39", + "sx40", "sx41", "sx42", "sx43", "sx44", "sx45", "sx46", "sx47", + "sx48", "sx49", "sx50", "sx51", "sx52", "sx53", "sx54", "sx55", + "sx56", "sx57", "sx58", "sx59", "sx60", "sx61", "sx62", "sx63", + // Vector registers + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", + "v32", "v33", "v34", "v35", "v36", "v37", "v38", "v39", + "v40", "v41", "v42", "v43", "v44", "v45", "v46", "v47", + "v48", "v49", "v50", "v51", "v52", "v53", "v54", "v55", + "v56", "v57", "v58", "v59", "v60", "v61", "v62", "v63", + // Special registers + "vl", "vixr", "ucc", "psw", "sar", "pmmr", + "pmcr0", "pmcr1", "pmcr2", "pmcr3", + "pmc0", "pmc1", "pmc2", "pmc3", "pmc4", "pmc5", "pmc6", "pmc7", + "pmc8", "pmc9", "pmc10", "pmc11", "pmc12", "pmc13", "pmc14", + }; + return llvm::makeArrayRef(GCCRegNames); + } + + ArrayRef getGCCRegAliases() const override { + static const TargetInfo::GCCRegAlias GCCRegAliases[] = { + {{"s0"}, "sx0"}, {{"s1"}, "sx1"}, + {{"s2"}, "sx2"}, {{"s3"}, "sx3"}, + {{"s4"}, "sx4"}, {{"s5"}, "sx5"}, + {{"s6"}, "sx6"}, {{"s7"}, "sx7"}, + {{"s8", "sl"}, "sx8"}, {{"s9", "fp"}, "sx9"}, + {{"s10", "lr"}, "sx10"}, {{"s11", "sp"}, "sx11"}, + {{"s12", "outer"}, "sx12"}, {{"s13"}, "sx13"}, + {{"s14", "tp"}, "sx14"}, {{"s15", "got"}, "sx15"}, + {{"s16", "plt"}, "sx16"}, {{"s17", "info"}, "sx17"}, + {{"s18"}, "sx18"}, {{"s19"}, "sx19"}, + {{"s20"}, "sx20"}, {{"s21"}, "sx21"}, + {{"s22"}, "sx22"}, {{"s23"}, "sx23"}, + {{"s24"}, "sx24"}, {{"s25"}, "sx25"}, + {{"s26"}, "sx26"}, {{"s27"}, "sx27"}, + {{"s28"}, "sx28"}, {{"s29"}, "sx29"}, + {{"s30"}, "sx30"}, {{"s31"}, "sx31"}, + {{"s32"}, "sx32"}, {{"s33"}, "sx33"}, + {{"s34"}, "sx34"}, {{"s35"}, "sx35"}, + {{"s36"}, "sx36"}, {{"s37"}, "sx37"}, + {{"s38"}, "sx38"}, {{"s39"}, "sx39"}, + {{"s40"}, "sx40"}, {{"s41"}, "sx41"}, + {{"s42"}, "sx42"}, {{"s43"}, "sx43"}, + {{"s44"}, "sx44"}, {{"s45"}, "sx45"}, + {{"s46"}, "sx46"}, {{"s47"}, "sx47"}, + {{"s48"}, "sx48"}, {{"s49"}, "sx49"}, + {{"s50"}, "sx50"}, {{"s51"}, "sx51"}, + {{"s52"}, "sx52"}, {{"s53"}, "sx53"}, + {{"s54"}, "sx54"}, {{"s55"}, "sx55"}, + {{"s56"}, "sx56"}, {{"s57"}, "sx57"}, + {{"s58"}, "sx58"}, {{"s59"}, "sx59"}, + {{"s60"}, "sx60"}, {{"s61"}, "sx61"}, + {{"s62"}, "sx62"}, {{"s63"}, "sx63"}, + {{"vix"}, "vixr"}, {{"usrcc"}, "ucc"}, + }; + return llvm::makeArrayRef(GCCRegAliases); + } + + bool validateAsmConstraint(const char *&Name, + TargetInfo::ConstraintInfo &Info) const override { + return false; + } + + int getEHDataRegisterNumber(unsigned RegNo) const override { + // S0 = ExceptionPointerRegister, S1 = ExceptionSelectorRegister + return (RegNo < 2) ? RegNo : -1; + } + + bool allowsLargerPreferedTypeAlignment() const override { return false; } +}; +} // namespace targets +} // namespace clang +#endif // LLVM_CLANG_LIB_BASIC_TARGETS_VE_H diff --git a/clang/lib/Basic/Targets/VE.cpp b/clang/lib/Basic/Targets/VE.cpp new file mode 100644 --- /dev/null +++ b/clang/lib/Basic/Targets/VE.cpp @@ -0,0 +1,48 @@ +//===--- VE.cpp - Implement VE target feature support ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements VE TargetInfo objects. +// +//===----------------------------------------------------------------------===// + +#include "VE.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/MacroBuilder.h" +#include "clang/Basic/TargetBuiltins.h" + +using namespace clang; +using namespace clang::targets; + +const Builtin::Info VETargetInfo::BuiltinInfo[] = { +#define BUILTIN(ID, TYPE, ATTRS) \ + {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr}, +#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \ + {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr}, +#include "clang/Basic/BuiltinsVE.def" +}; + +void VETargetInfo::getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const { + Builder.defineMacro("_LP64", "1"); + Builder.defineMacro("unix", "1"); + Builder.defineMacro("__unix__", "1"); + Builder.defineMacro("__linux__", "1"); + Builder.defineMacro("__ve", "1"); + Builder.defineMacro("__ve__", "1"); + Builder.defineMacro("__STDC_HOSTED__", "1"); + Builder.defineMacro("__STDC__", "1"); + Builder.defineMacro("__NEC__", "1"); + // FIXME: define __FAST_MATH__ 1 if -ffast-math is enabled + // FIXME: define __OPTIMIZE__ n if -On is enabled + // FIXME: define __VECTOR__ n 1 if automatic vectorization is enabled +} + +ArrayRef VETargetInfo::getTargetBuiltins() const { + return llvm::makeArrayRef(BuiltinInfo, clang::VE::LastTSBuiltin - + Builtin::FirstTSBuiltin); +} diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -9763,6 +9763,46 @@ }; } // namespace +//===----------------------------------------------------------------------===// +// VE ABI Implementation. +// Copied from SPARC V8 ABI. +// +// Ensures that complex values are passed in registers. +// +namespace { +class VEABIInfo : public DefaultABIInfo { +public: + VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} + +private: + ABIArgInfo classifyReturnType(QualType RetTy) const; + void computeInfo(CGFunctionInfo &FI) const override; +}; +} // end anonymous namespace + +ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const { + if (Ty->isAnyComplexType()) { + return ABIArgInfo::getDirect(); + } else { + return DefaultABIInfo::classifyReturnType(Ty); + } +} + +void VEABIInfo::computeInfo(CGFunctionInfo &FI) const { + + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + for (auto &Arg : FI.arguments()) + Arg.info = classifyArgumentType(Arg.type); +} + +namespace { +class VETargetCodeGenInfo : public TargetCodeGenInfo { +public: + VETargetCodeGenInfo(CodeGenTypes &CGT) + : TargetCodeGenInfo(new VEABIInfo(CGT)) {} +}; +} // end anonymous namespace + //===----------------------------------------------------------------------===// // Driver code //===----------------------------------------------------------------------===// @@ -9950,6 +9990,8 @@ case llvm::Triple::spir: case llvm::Triple::spir64: return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); + case llvm::Triple::ve: + return SetCGInfo(new VETargetCodeGenInfo(Types)); } } diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -31,6 +31,7 @@ ToolChains/Arch/Sparc.cpp ToolChains/Arch/SystemZ.cpp ToolChains/Arch/X86.cpp + ToolChains/Arch/VE.cpp ToolChains/AIX.cpp ToolChains/Ananas.cpp ToolChains/AMDGPU.cpp @@ -70,6 +71,7 @@ ToolChains/XCore.cpp ToolChains/PPCLinux.cpp ToolChains/InterfaceStubs.cpp + ToolChains/VE.cpp Types.cpp XRayArgs.cpp diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -45,6 +45,7 @@ #include "ToolChains/TCE.h" #include "ToolChains/WebAssembly.h" #include "ToolChains/XCore.h" +#include "ToolChains/VE.h" #include "clang/Basic/Version.h" #include "clang/Config/config.h" #include "clang/Driver/Action.h" @@ -4823,17 +4824,19 @@ case llvm::Triple::Linux: case llvm::Triple::ELFIAMCU: if (Target.getArch() == llvm::Triple::hexagon) - TC = std::make_unique(*this, Target, - Args); + TC = + std::make_unique(*this, Target, Args); + else if (Target.getArch() == llvm::Triple::ve) + TC = std::make_unique(*this, Target, Args); else if ((Target.getVendor() == llvm::Triple::MipsTechnologies) && !Target.hasEnvironment()) TC = std::make_unique(*this, Target, - Args); + Args); else if (Target.getArch() == llvm::Triple::ppc || Target.getArch() == llvm::Triple::ppc64 || Target.getArch() == llvm::Triple::ppc64le) TC = std::make_unique(*this, Target, - Args); + Args); else TC = std::make_unique(*this, Target, Args); break; @@ -4924,6 +4927,9 @@ case llvm::Triple::riscv64: TC = std::make_unique(*this, Target, Args); break; + case llvm::Triple::ve: + TC = std::make_unique(*this, Target, Args); + break; default: if (Target.getVendor() == llvm::Triple::Myriad) TC = std::make_unique(*this, Target, diff --git a/clang/lib/Driver/ToolChains/Arch/VE.h b/clang/lib/Driver/ToolChains/Arch/VE.h new file mode 100644 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Arch/VE.h @@ -0,0 +1,41 @@ +//===--- VE.h - VE-specific Tool Helpers ------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H + +#include "clang/Driver/Driver.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Option/Option.h" +#include +#include + +namespace clang { +namespace driver { +namespace tools { +namespace ve { + +enum class FloatABI { + Invalid, + Soft, + Hard, +}; + +FloatABI getVEFloatABI(const Driver &D, const llvm::opt::ArgList &Args); + +void getVETargetFeatures(const Driver &D, const llvm::opt::ArgList &Args, + std::vector &Features); +const char *getVEAsmModeForCPU(llvm::StringRef Name, + const llvm::Triple &Triple); + +} // end namespace ve +} // end namespace target +} // end namespace driver +} // end namespace clang + +#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_VE_H diff --git a/clang/lib/Driver/ToolChains/Arch/VE.cpp b/clang/lib/Driver/ToolChains/Arch/VE.cpp new file mode 100644 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Arch/VE.cpp @@ -0,0 +1,80 @@ +//===--- VE.cpp - Tools Implementations -------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "VE.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Driver/Options.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/Option/ArgList.h" + +using namespace clang::driver; +using namespace clang::driver::tools; +using namespace clang; +using namespace llvm::opt; + +const char *ve::getVEAsmModeForCPU(StringRef Name, + const llvm::Triple &Triple) { + return llvm::StringSwitch(Name) + .Default(""); +} + +ve::FloatABI ve::getVEFloatABI(const Driver &D, + const ArgList &Args) { + ve::FloatABI ABI = ve::FloatABI::Invalid; + if (Arg *A = Args.getLastArg(clang::driver::options::OPT_msoft_float, + options::OPT_mhard_float, + options::OPT_mfloat_abi_EQ)) { + if (A->getOption().matches(clang::driver::options::OPT_msoft_float)) + ABI = ve::FloatABI::Soft; + else if (A->getOption().matches(options::OPT_mhard_float)) + ABI = ve::FloatABI::Hard; + else { + ABI = llvm::StringSwitch(A->getValue()) + .Case("soft", ve::FloatABI::Soft) + .Case("hard", ve::FloatABI::Hard) + .Default(ve::FloatABI::Invalid); + if (ABI == ve::FloatABI::Invalid && + !StringRef(A->getValue()).empty()) { + D.Diag(clang::diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args); + ABI = ve::FloatABI::Hard; + } + } + } + + // If unspecified, choose the default based on the platform. + // Only the hard-float ABI on VE is standardized, and it is the + // default. GCC also supports a nonstandard soft-float ABI mode, also + // implemented in LLVM. However as this is not standard we set the default + // to be hard-float. + if (ABI == ve::FloatABI::Invalid) { + ABI = ve::FloatABI::Hard; + } + + return ABI; +} + +void ve::getVETargetFeatures(const Driver &D, const ArgList &Args, + std::vector &Features) { + ve::FloatABI FloatABI = ve::getVEFloatABI(D, Args); + if (FloatABI == ve::FloatABI::Soft) + Features.push_back("+soft-float"); + + // -mno-vevec is default, unless -mvevec is specified. + bool VEVec = false; + if (auto *A = Args.getLastArg(options::OPT_mvevec, options::OPT_mno_vevec)) { + if (A->getOption().matches(options::OPT_mvevec)) { + VEVec = true; + Features.push_back("+vec"); + } + } + + if (!VEVec) { + Features.push_back("-vec"); + } +} diff --git a/clang/lib/Driver/ToolChains/Clang.h b/clang/lib/Driver/ToolChains/Clang.h --- a/clang/lib/Driver/ToolChains/Clang.h +++ b/clang/lib/Driver/ToolChains/Clang.h @@ -73,6 +73,8 @@ llvm::opt::ArgStringList &CmdArgs) const; void AddWebAssemblyTargetArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const; + void AddVETargetArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const; enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile }; diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -15,6 +15,7 @@ #include "Arch/Sparc.h" #include "Arch/SystemZ.h" #include "Arch/X86.h" +#include "Arch/VE.h" #include "AMDGPU.h" #include "CommonArgs.h" #include "Hexagon.h" @@ -368,6 +369,9 @@ break; case llvm::Triple::msp430: msp430::getMSP430TargetFeatures(D, Args, Features); + break; + case llvm::Triple::ve: + ve::getVETargetFeatures(D, Args, Features); } // Find the last of each feature. @@ -1675,6 +1679,10 @@ case llvm::Triple::wasm64: AddWebAssemblyTargetArgs(Args, CmdArgs); break; + + case llvm::Triple::ve: + AddVETargetArgs(Args, CmdArgs); + break; } } @@ -2112,6 +2120,24 @@ } } +void Clang::AddVETargetArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + ve::FloatABI FloatABI = + ve::getVEFloatABI(getToolChain().getDriver(), Args); + + if (FloatABI == ve::FloatABI::Soft) { + // Floating point operations and argument passing are soft. + CmdArgs.push_back("-msoft-float"); + CmdArgs.push_back("-mfloat-abi"); + CmdArgs.push_back("soft"); + } else { + // Floating point operations and argument passing are hard. + assert(FloatABI == ve::FloatABI::Hard && "Invalid float abi!"); + CmdArgs.push_back("-mfloat-abi"); + CmdArgs.push_back("hard"); + } +} + void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename, StringRef Target, const InputInfo &Output, const InputInfo &Input, const ArgList &Args) const { @@ -5494,7 +5520,10 @@ // Enable vectorization per default according to the optimization level // selected. For optimization levels that want vectorization we use the alias // option to simplify the hasFlag logic. - bool EnableVec = shouldEnableVectorizerAtOLevel(Args, false); + // Disable vectorization by default for the case of VE temporary, + // until VE supports vector instructions. + const bool IsVE = TC.getTriple().isVE(); + bool EnableVec = IsVE ? false : shouldEnableVectorizerAtOLevel(Args, false); OptSpecifier VectorizeAliasOption = EnableVec ? options::OPT_O_Group : options::OPT_fvectorize; if (Args.hasFlag(options::OPT_fvectorize, VectorizeAliasOption, @@ -5502,7 +5531,7 @@ CmdArgs.push_back("-vectorize-loops"); // -fslp-vectorize is enabled based on the optimization level selected. - bool EnableSLPVec = shouldEnableVectorizerAtOLevel(Args, true); + bool EnableSLPVec = IsVE ? false : shouldEnableVectorizerAtOLevel(Args, true); OptSpecifier SLPVectAliasOption = EnableSLPVec ? options::OPT_O_Group : options::OPT_fslp_vectorize; if (Args.hasFlag(options::OPT_fslp_vectorize, SLPVectAliasOption, diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -489,8 +489,11 @@ void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs) { + // Enable -frtlib-add-rpath by default for the case of VE. + const bool IsVE = TC.getTriple().isVE(); + bool DefaultValue = IsVE; if (!Args.hasFlag(options::OPT_frtlib_add_rpath, - options::OPT_fno_rtlib_add_rpath, false)) + options::OPT_fno_rtlib_add_rpath, DefaultValue)) return; std::string CandidateRPath = TC.getArchSpecificLibPath(); diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -304,6 +304,8 @@ if (T.getEnvironment() == llvm::Triple::GNUX32) return "elf32_x86_64"; return "elf_x86_64"; + case llvm::Triple::ve: + return "elf64ve"; default: return nullptr; } @@ -355,6 +357,8 @@ const llvm::Triple::ArchType Arch = ToolChain.getArch(); const bool isAndroid = ToolChain.getTriple().isAndroid(); const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU(); + const bool IsVE = ToolChain.getTriple().isVE(); + const bool IsMusl = ToolChain.getTriple().isMusl(); const bool IsPIE = getPIE(Args, ToolChain); const bool IsStaticPIE = getStaticPIE(Args, ToolChain); const bool IsStatic = getStatic(Args); @@ -475,6 +479,11 @@ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o"))); } + if (IsVE) { + CmdArgs.push_back("-z"); + CmdArgs.push_back("max-page-size=0x4000000"); + } + if (IsIAMCU) CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); else if (HasCRTBeginEndFiles) { @@ -570,7 +579,7 @@ AddRunTimeLibs(ToolChain, D, CmdArgs, Args); - if (WantPthread && !isAndroid) + if (WantPthread && !isAndroid && !(IsVE && IsMusl)) CmdArgs.push_back("-lpthread"); if (Args.hasArg(options::OPT_fsplit_stack)) @@ -646,6 +655,7 @@ llvm::Reloc::Model RelocationModel; unsigned PICLevel; bool IsPIE; + const char *DefaultAssembler = "as"; std::tie(RelocationModel, PICLevel, IsPIE) = ParsePICArgs(getToolChain(), Args); @@ -866,6 +876,8 @@ CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName)); break; } + case llvm::Triple::ve: + DefaultAssembler = "nas"; } for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ, @@ -890,7 +902,8 @@ for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); - const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as")); + const char *Exec = + Args.MakeArgString(getToolChain().GetProgramPath(DefaultAssembler)); C.addCommand(std::make_unique(JA, *this, Exec, CmdArgs, Inputs)); // Handle the debug info splitting at object creation time if we're diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp --- a/clang/lib/Driver/ToolChains/Linux.cpp +++ b/clang/lib/Driver/ToolChains/Linux.cpp @@ -518,6 +518,7 @@ if (Triple.isMusl()) { std::string ArchName; + std::string Path = "/lib/"; bool IsArm = false; switch (Arch) { @@ -531,6 +532,10 @@ ArchName = "armeb"; IsArm = true; break; + case llvm::Triple::ve: + Path = "/opt/nec/ve/musl/lib/"; + ArchName = "ve"; + break; default: ArchName = Triple.getArchName().str(); } @@ -539,7 +544,7 @@ tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard)) ArchName += "hf"; - return "/lib/ld-musl-" + ArchName + ".so.1"; + return Path + "ld-musl-" + ArchName + ".so.1"; } std::string LibDir; @@ -638,6 +643,9 @@ Loader = X32 ? "ld-linux-x32.so.2" : "ld-linux-x86-64.so.2"; break; } + case llvm::Triple::ve: + return "/opt/nec/ve/lib/ld-linux-ve.so.1"; + break; } if (Distro == Distro::Exherbo && diff --git a/clang/lib/Driver/ToolChains/VE.h b/clang/lib/Driver/ToolChains/VE.h new file mode 100644 --- /dev/null +++ b/clang/lib/Driver/ToolChains/VE.h @@ -0,0 +1,66 @@ +//===--- VE.h - VE ToolChain Implementations --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H + +#include "Linux.h" +#include "clang/Driver/ToolChain.h" + +namespace clang { +namespace driver { +namespace toolchains { + +class LLVM_LIBRARY_VISIBILITY VEToolChain : public Linux { +public: + VEToolChain(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args); + +protected: + Tool *buildAssembler() const override; + Tool *buildLinker() const override; + +public: + bool isPICDefault() const override; + bool isPIEDefault() const override; + bool isPICDefaultForced() const override; + bool SupportsProfiling() const override; + bool hasBlocksRuntime() const override; + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + void + addClangTargetOptions(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args, + Action::OffloadKind DeviceOffloadKind) const override; + void AddClangCXXStdlibIncludeArgs( + const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const override; + + llvm::ExceptionHandling + GetExceptionModel(const llvm::opt::ArgList &Args) const override; + + CXXStdlibType + GetCXXStdlibType(const llvm::opt::ArgList &Args) const override { + return ToolChain::CST_Libcxx; + } + + RuntimeLibType GetDefaultRuntimeLibType() const override { + return ToolChain::RLT_CompilerRT; + } + + const char *getDefaultLinker() const override { return "nld"; } +}; + +} // end namespace toolchains +} // end namespace driver +} // end namespace clang + +#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_VE_H diff --git a/clang/lib/Driver/ToolChains/VE.cpp b/clang/lib/Driver/ToolChains/VE.cpp new file mode 100644 --- /dev/null +++ b/clang/lib/Driver/ToolChains/VE.cpp @@ -0,0 +1,151 @@ +//===--- VE.cpp - VE ToolChain Implementations ------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "VE.h" +#include "CommonArgs.h" +#include "clang/Driver/Compilation.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/Options.h" +#include "llvm/Option/ArgList.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/Path.h" +#include // ::getenv + +using namespace clang::driver; +using namespace clang::driver::toolchains; +using namespace clang; +using namespace llvm::opt; + +/// VE tool chain +VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args) + : Linux(D, Triple, Args) { + getProgramPaths().push_back("/opt/nec/ve/bin"); + // ProgramPaths are found via 'PATH' environment variable. + + // default file paths are: + // ${RESOURCEDIR}/lib/linux/ve (== getArchSpecificLibPath) + // /lib/../lib64 + // /usr/lib/../lib64 + // ${BINPATH}/../lib + // /lib + // /usr/lib + // + // These are OK for host, but no go for VE. So, defines them all + // from scratch here. + getFilePaths().clear(); + getFilePaths().push_back(getArchSpecificLibPath()); + if (getTriple().isMusl()) + getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/musl/lib"); + else + getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib"); +} + +Tool *VEToolChain::buildAssembler() const { + return new tools::gnutools::Assembler(*this); +} + +Tool *VEToolChain::buildLinker() const { + return new tools::gnutools::Linker(*this); +} + +bool VEToolChain::isPICDefault() const { return false; } + +bool VEToolChain::isPIEDefault() const { return false; } + +bool VEToolChain::isPICDefaultForced() const { return false; } + +bool VEToolChain::SupportsProfiling() const { return false; } + +bool VEToolChain::hasBlocksRuntime() const { return false; } + +void VEToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc)) + return; + + if (DriverArgs.hasArg(options::OPT_nobuiltininc) && + DriverArgs.hasArg(options::OPT_nostdlibinc)) + return; + + if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { + SmallString<128> P(getDriver().ResourceDir); + llvm::sys::path::append(P, "include"); + addSystemInclude(DriverArgs, CC1Args, P); + } + + if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) { + if (const char *cl_include_dir = getenv("NCC_C_INCLUDE_PATH")) { + SmallVector Dirs; + const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'}; + StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr)); + ArrayRef DirVec(Dirs); + addSystemIncludes(DriverArgs, CC1Args, DirVec); + } else { + if (getTriple().isMusl()) + addSystemInclude(DriverArgs, CC1Args, + getDriver().SysRoot + "/opt/nec/ve/musl/include"); + else + addSystemInclude(DriverArgs, CC1Args, + getDriver().SysRoot + "/opt/nec/ve/include"); + } + } +} + +void VEToolChain::addClangTargetOptions(const ArgList &DriverArgs, + ArgStringList &CC1Args, + Action::OffloadKind) const { + CC1Args.push_back("-nostdsysteminc"); + bool UseInitArrayDefault = true; + if (DriverArgs.hasFlag(options::OPT_fuse_init_array, + options::OPT_fno_use_init_array, UseInitArrayDefault)) + CC1Args.push_back("-fuse-init-array"); +} + +void VEToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) || + DriverArgs.hasArg(options::OPT_nostdlibinc) || + DriverArgs.hasArg(options::OPT_nostdincxx)) + return; + if (const char *cl_include_dir = getenv("NCC_CPLUS_INCLUDE_PATH")) { + SmallVector Dirs; + const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'}; + StringRef(cl_include_dir).split(Dirs, StringRef(EnvPathSeparatorStr)); + ArrayRef DirVec(Dirs); + addSystemIncludes(DriverArgs, CC1Args, DirVec); + } else { + SmallString<128> P(getDriver().ResourceDir); + llvm::sys::path::append(P, "include/c++/v1"); + addSystemInclude(DriverArgs, CC1Args, P); + } +} + +void VEToolChain::AddCXXStdlibLibArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + assert((GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) && + "Only -lc++ (aka libxx) is supported in this toolchain."); + + tools::addArchSpecificRPath(*this, Args, CmdArgs); + + CmdArgs.push_back("-lc++"); + CmdArgs.push_back("-lc++abi"); + CmdArgs.push_back("-lunwind"); + // libc++ requires -lpthread under glibc environment + // libunwind requires -ldl under glibc environment + if (!getTriple().isMusl()) { + CmdArgs.push_back("-lpthread"); + CmdArgs.push_back("-ldl"); + } +} + +llvm::ExceptionHandling +VEToolChain::GetExceptionModel(const ArgList &Args) const { + // VE uses SjLj exceptions. + return llvm::ExceptionHandling::SjLj; +} diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2980,7 +2980,8 @@ Arch != llvm::Triple::x86; emitError |= (DefaultCC == LangOptions::DCC_VectorCall || DefaultCC == LangOptions::DCC_RegCall) && - !(Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64); + !(Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64 || + Arch == llvm::Triple::ve); if (emitError) Diags.Report(diag::err_drv_argument_not_allowed_with) << A->getSpelling() << T.getTriple(); diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -120,6 +120,9 @@ xsaveoptintrin.h xsavesintrin.h xtestintrin.h + velintrin.h + velintrin_gen.h + velintrin_approx.h ) set(cuda_wrapper_files diff --git a/clang/lib/Headers/velintrin.h b/clang/lib/Headers/velintrin.h new file mode 100644 --- /dev/null +++ b/clang/lib/Headers/velintrin.h @@ -0,0 +1,48 @@ +#ifndef __VEL_INTRIN_H__ +#define __VEL_INTRIN_H__ + +typedef double __vr __attribute__((__vector_size__(2048))); +typedef double __vm __attribute__((__vector_size__(32))); +typedef double __vm256 __attribute__((__vector_size__(32))); +typedef double __vm512 __attribute__((__vector_size__(64))); + +enum VShuffleCodes { + VE_VSHUFFLE_YUYU = 0, + VE_VSHUFFLE_YUYL = 1, + VE_VSHUFFLE_YUZU = 2, + VE_VSHUFFLE_YUZL = 3, + VE_VSHUFFLE_YLYU = 4, + VE_VSHUFFLE_YLYL = 5, + VE_VSHUFFLE_YLZU = 6, + VE_VSHUFFLE_YLZL = 7, + VE_VSHUFFLE_ZUYU = 8, + VE_VSHUFFLE_ZUYL = 9, + VE_VSHUFFLE_ZUZU = 10, + VE_VSHUFFLE_ZUZL = 11, + VE_VSHUFFLE_ZLYU = 12, + VE_VSHUFFLE_ZLYL = 13, + VE_VSHUFFLE_ZLZU = 14, + VE_VSHUFFLE_ZLZL = 15, +} ; + +#include +#include + +#define _vel_svob() __builtin_ve_vl_svob() + +// pack + +#define _vel_pack_f32p __builtin_ve_vl_pack_f32p +#define _vel_pack_f32a __builtin_ve_vl_pack_f32a + +static inline unsigned long int _vel_pack_i32(int a, int b) +{ + return (((unsigned long int)a) << 32) | (unsigned int)b; +} + +#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm) +#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm) +#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm) +#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm) + +#endif diff --git a/clang/lib/Headers/velintrin_approx.h b/clang/lib/Headers/velintrin_approx.h new file mode 100644 --- /dev/null +++ b/clang/lib/Headers/velintrin_approx.h @@ -0,0 +1,115 @@ +#ifndef __VEL_INTRIN_APPROX_H__ +#define __VEL_INTRIN_APPROX_H__ + +static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) { + float s0; + __vr v2, v3, v4, v5; + v5 = _vel_vrcps_vvl(v1, l); + s0 = 1.0; + v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l); + v3 = _vel_vfmads_vvvvl(v5, v5, v4, l); + v2 = _vel_vfmuls_vvvl(v0, v3, l); + v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); + v2 = _vel_vfmads_vvvvl(v2, v5, v4, l); + v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l); + v0 = _vel_vfmads_vvvvl(v2, v3, v0, l); + return v0; +} + +static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) { + float s0; + __vr v2, v3, v4, v5; + v5 = _vel_pvrcp_vvl(v1, l); + s0 = 1.0; + v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l); + v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l); + v2 = _vel_pvfmul_vvvl(v0, v3, l); + v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); + v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l); + v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l); + v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l); + return v0; +} + +static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) { + float s1; + __vr v1, v2, v3, v4; + v4 = _vel_vrcps_vvl(v0, l); + s1 = 1.0; + v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l); + v2 = _vel_vfmads_vvvvl(v4, v4, v2, l); + v1 = _vel_vfmuls_vsvl(s0, v2, l); + v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l); + v1 = _vel_vfmads_vvvvl(v1, v4, v3, l); + v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l); + v0 = _vel_vfmads_vvvvl(v1, v2, v3, l); + return v0; +} + +static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) { + float s1; + __vr v1, v2; + s1 = 1.0f / s0; + v1 = _vel_vfmuls_vsvl(s1, v0, l); + v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l); + v0 = _vel_vfmads_vvsvl(v1, s1, v2, l); + return v0; +} + +static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) +{ + __vr v1, v2, v3; + v2 = _vel_vrcpd_vvl(v0, l); + double s1 = 1.0; + v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l); + v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l); + v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l); + v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l); + v1 = _vel_vaddul_vsvl(1, v1, l); + v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l); + v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l); + v1 = _vel_vfmuld_vsvl(s0, v3, l); + v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l); + v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l); + return v0; +} + +static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) +{ + double s0, s1; + __vr v1, v2, v3; + v2 = _vel_vrsqrtdnex_vvl(v0, l); + v1 = _vel_vfmuld_vvvl(v0, v2, l); + s0 = 1.0; + s1 = 0.5; + v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l); + v3 = _vel_vfmuld_vsvl(s1, v3, l); + v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l); + v1 = _vel_vfmuld_vvvl(v0, v2, l); + v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l); + v3 = _vel_vfmuld_vsvl(s1, v3, l); + v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l); + return v0; +} + +static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) +{ + float s0, s1; + __vr v1, v2, v3; + v0 = _vel_vcvtds_vvl(v0, l); + v2 = _vel_vrsqrtdnex_vvl(v0, l); + v1 = _vel_vfmuld_vvvl(v0, v2, l); + s0 = 1.0; + s1 = 0.5; + v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l); + v3 = _vel_vfmuld_vsvl(s1, v3, l); + v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l); + v1 = _vel_vfmuld_vvvl(v0, v2, l); + v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l); + v3 = _vel_vfmuld_vsvl(s1, v3, l); + v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l); + v0 = _vel_vcvtsd_vvl(v0, l); + return v0; +} + +#endif diff --git a/clang/lib/Headers/velintrin_gen.h b/clang/lib/Headers/velintrin_gen.h new file mode 100644 --- /dev/null +++ b/clang/lib/Headers/velintrin_gen.h @@ -0,0 +1,1301 @@ +#define _vel_vld_vssl __builtin_ve_vl_vld_vssl +#define _vel_vld_vssvl __builtin_ve_vl_vld_vssvl +#define _vel_vldnc_vssl __builtin_ve_vl_vldnc_vssl +#define _vel_vldnc_vssvl __builtin_ve_vl_vldnc_vssvl +#define _vel_vldu_vssl __builtin_ve_vl_vldu_vssl +#define _vel_vldu_vssvl __builtin_ve_vl_vldu_vssvl +#define _vel_vldunc_vssl __builtin_ve_vl_vldunc_vssl +#define _vel_vldunc_vssvl __builtin_ve_vl_vldunc_vssvl +#define _vel_vldlsx_vssl __builtin_ve_vl_vldlsx_vssl +#define _vel_vldlsx_vssvl __builtin_ve_vl_vldlsx_vssvl +#define _vel_vldlsxnc_vssl __builtin_ve_vl_vldlsxnc_vssl +#define _vel_vldlsxnc_vssvl __builtin_ve_vl_vldlsxnc_vssvl +#define _vel_vldlzx_vssl __builtin_ve_vl_vldlzx_vssl +#define _vel_vldlzx_vssvl __builtin_ve_vl_vldlzx_vssvl +#define _vel_vldlzxnc_vssl __builtin_ve_vl_vldlzxnc_vssl +#define _vel_vldlzxnc_vssvl __builtin_ve_vl_vldlzxnc_vssvl +#define _vel_vld2d_vssl __builtin_ve_vl_vld2d_vssl +#define _vel_vld2d_vssvl __builtin_ve_vl_vld2d_vssvl +#define _vel_vld2dnc_vssl __builtin_ve_vl_vld2dnc_vssl +#define _vel_vld2dnc_vssvl __builtin_ve_vl_vld2dnc_vssvl +#define _vel_vldu2d_vssl __builtin_ve_vl_vldu2d_vssl +#define _vel_vldu2d_vssvl __builtin_ve_vl_vldu2d_vssvl +#define _vel_vldu2dnc_vssl __builtin_ve_vl_vldu2dnc_vssl +#define _vel_vldu2dnc_vssvl __builtin_ve_vl_vldu2dnc_vssvl +#define _vel_vldl2dsx_vssl __builtin_ve_vl_vldl2dsx_vssl +#define _vel_vldl2dsx_vssvl __builtin_ve_vl_vldl2dsx_vssvl +#define _vel_vldl2dsxnc_vssl __builtin_ve_vl_vldl2dsxnc_vssl +#define _vel_vldl2dsxnc_vssvl __builtin_ve_vl_vldl2dsxnc_vssvl +#define _vel_vldl2dzx_vssl __builtin_ve_vl_vldl2dzx_vssl +#define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl +#define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl +#define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl +#define _vel_vst_vssl __builtin_ve_vl_vst_vssl +#define _vel_vst_vssml __builtin_ve_vl_vst_vssml +#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl +#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml +#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl +#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml +#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl +#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml +#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl +#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml +#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl +#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml +#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl +#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml +#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl +#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml +#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl +#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml +#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl +#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml +#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl +#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml +#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl +#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml +#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl +#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml +#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl +#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml +#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl +#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml +#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl +#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml +#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl +#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml +#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl +#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml +#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl +#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml +#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl +#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml +#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl +#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml +#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl +#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml +#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl +#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml +#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl +#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml +#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl +#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl +#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss +#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs +#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs +#define _vel_lvss_svs __builtin_ve_vl_lvss_svs +#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss +#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss +#define _vel_svm_sms __builtin_ve_vl_svm_sms +#define _vel_svm_sMs __builtin_ve_vl_svm_sMs +#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl +#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl +#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl +#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl +#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl +#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl +#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl +#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl +#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl +#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl +#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl +#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl +#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl +#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl +#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl +#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl +#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl +#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl +#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl +#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl +#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl +#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl +#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl +#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl +#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl +#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl +#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl +#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl +#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl +#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl +#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl +#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl +#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl +#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl +#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl +#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl +#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl +#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl +#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl +#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl +#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl +#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl +#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl +#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl +#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl +#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl +#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl +#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl +#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl +#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl +#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl +#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl +#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl +#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl +#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl +#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl +#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl +#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl +#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl +#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl +#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl +#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl +#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl +#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl +#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl +#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl +#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl +#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl +#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl +#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl +#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl +#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl +#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl +#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl +#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl +#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl +#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl +#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl +#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl +#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl +#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl +#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl +#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl +#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl +#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl +#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl +#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl +#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl +#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl +#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl +#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl +#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl +#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl +#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl +#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl +#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl +#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl +#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl +#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl +#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl +#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl +#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl +#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl +#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl +#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl +#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl +#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl +#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl +#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl +#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl +#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl +#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl +#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl +#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl +#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl +#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl +#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl +#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl +#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl +#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl +#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl +#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl +#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl +#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl +#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl +#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl +#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl +#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl +#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl +#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl +#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl +#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl +#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl +#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl +#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl +#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl +#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl +#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl +#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl +#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl +#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl +#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl +#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl +#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl +#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl +#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl +#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl +#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl +#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl +#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl +#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl +#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl +#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl +#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl +#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl +#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl +#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl +#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl +#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl +#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl +#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl +#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl +#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl +#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl +#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl +#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl +#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl +#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl +#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl +#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl +#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl +#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl +#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl +#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl +#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl +#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl +#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl +#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl +#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl +#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl +#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl +#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl +#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl +#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl +#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl +#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl +#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl +#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl +#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl +#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl +#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl +#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl +#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl +#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl +#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl +#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl +#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl +#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl +#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl +#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl +#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl +#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl +#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl +#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl +#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl +#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl +#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl +#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl +#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl +#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl +#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl +#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl +#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl +#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl +#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl +#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl +#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl +#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl +#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl +#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl +#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl +#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl +#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl +#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl +#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl +#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl +#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl +#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl +#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl +#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl +#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl +#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl +#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl +#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl +#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl +#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl +#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl +#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl +#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl +#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl +#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl +#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl +#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl +#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl +#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl +#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl +#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl +#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl +#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl +#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl +#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl +#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl +#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl +#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl +#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl +#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl +#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl +#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl +#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl +#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl +#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl +#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl +#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl +#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl +#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl +#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl +#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl +#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl +#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl +#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl +#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl +#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl +#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl +#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl +#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl +#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl +#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl +#define _vel_pvandlo_vvvl __builtin_ve_vl_pvandlo_vvvl +#define _vel_pvandlo_vvvvl __builtin_ve_vl_pvandlo_vvvvl +#define _vel_pvandlo_vsvl __builtin_ve_vl_pvandlo_vsvl +#define _vel_pvandlo_vsvvl __builtin_ve_vl_pvandlo_vsvvl +#define _vel_pvandlo_vvvMvl __builtin_ve_vl_pvandlo_vvvMvl +#define _vel_pvandlo_vsvMvl __builtin_ve_vl_pvandlo_vsvMvl +#define _vel_pvandup_vvvl __builtin_ve_vl_pvandup_vvvl +#define _vel_pvandup_vvvvl __builtin_ve_vl_pvandup_vvvvl +#define _vel_pvandup_vsvl __builtin_ve_vl_pvandup_vsvl +#define _vel_pvandup_vsvvl __builtin_ve_vl_pvandup_vsvvl +#define _vel_pvandup_vvvMvl __builtin_ve_vl_pvandup_vvvMvl +#define _vel_pvandup_vsvMvl __builtin_ve_vl_pvandup_vsvMvl +#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl +#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl +#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl +#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl +#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl +#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl +#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl +#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl +#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl +#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl +#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl +#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl +#define _vel_pvorlo_vvvl __builtin_ve_vl_pvorlo_vvvl +#define _vel_pvorlo_vvvvl __builtin_ve_vl_pvorlo_vvvvl +#define _vel_pvorlo_vsvl __builtin_ve_vl_pvorlo_vsvl +#define _vel_pvorlo_vsvvl __builtin_ve_vl_pvorlo_vsvvl +#define _vel_pvorlo_vvvMvl __builtin_ve_vl_pvorlo_vvvMvl +#define _vel_pvorlo_vsvMvl __builtin_ve_vl_pvorlo_vsvMvl +#define _vel_pvorup_vvvl __builtin_ve_vl_pvorup_vvvl +#define _vel_pvorup_vvvvl __builtin_ve_vl_pvorup_vvvvl +#define _vel_pvorup_vsvl __builtin_ve_vl_pvorup_vsvl +#define _vel_pvorup_vsvvl __builtin_ve_vl_pvorup_vsvvl +#define _vel_pvorup_vvvMvl __builtin_ve_vl_pvorup_vvvMvl +#define _vel_pvorup_vsvMvl __builtin_ve_vl_pvorup_vsvMvl +#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl +#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl +#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl +#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl +#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl +#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl +#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl +#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl +#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl +#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl +#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl +#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl +#define _vel_pvxorlo_vvvl __builtin_ve_vl_pvxorlo_vvvl +#define _vel_pvxorlo_vvvvl __builtin_ve_vl_pvxorlo_vvvvl +#define _vel_pvxorlo_vsvl __builtin_ve_vl_pvxorlo_vsvl +#define _vel_pvxorlo_vsvvl __builtin_ve_vl_pvxorlo_vsvvl +#define _vel_pvxorlo_vvvMvl __builtin_ve_vl_pvxorlo_vvvMvl +#define _vel_pvxorlo_vsvMvl __builtin_ve_vl_pvxorlo_vsvMvl +#define _vel_pvxorup_vvvl __builtin_ve_vl_pvxorup_vvvl +#define _vel_pvxorup_vvvvl __builtin_ve_vl_pvxorup_vvvvl +#define _vel_pvxorup_vsvl __builtin_ve_vl_pvxorup_vsvl +#define _vel_pvxorup_vsvvl __builtin_ve_vl_pvxorup_vsvvl +#define _vel_pvxorup_vvvMvl __builtin_ve_vl_pvxorup_vvvMvl +#define _vel_pvxorup_vsvMvl __builtin_ve_vl_pvxorup_vsvMvl +#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl +#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl +#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl +#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl +#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl +#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl +#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl +#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl +#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl +#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl +#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl +#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl +#define _vel_pveqvlo_vvvl __builtin_ve_vl_pveqvlo_vvvl +#define _vel_pveqvlo_vvvvl __builtin_ve_vl_pveqvlo_vvvvl +#define _vel_pveqvlo_vsvl __builtin_ve_vl_pveqvlo_vsvl +#define _vel_pveqvlo_vsvvl __builtin_ve_vl_pveqvlo_vsvvl +#define _vel_pveqvlo_vvvMvl __builtin_ve_vl_pveqvlo_vvvMvl +#define _vel_pveqvlo_vsvMvl __builtin_ve_vl_pveqvlo_vsvMvl +#define _vel_pveqvup_vvvl __builtin_ve_vl_pveqvup_vvvl +#define _vel_pveqvup_vvvvl __builtin_ve_vl_pveqvup_vvvvl +#define _vel_pveqvup_vsvl __builtin_ve_vl_pveqvup_vsvl +#define _vel_pveqvup_vsvvl __builtin_ve_vl_pveqvup_vsvvl +#define _vel_pveqvup_vvvMvl __builtin_ve_vl_pveqvup_vvvMvl +#define _vel_pveqvup_vsvMvl __builtin_ve_vl_pveqvup_vsvMvl +#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl +#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl +#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl +#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl +#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl +#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl +#define _vel_vseq_vl __builtin_ve_vl_vseq_vl +#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl +#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl +#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl +#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl +#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl +#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl +#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl +#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl +#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl +#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl +#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl +#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl +#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl +#define _vel_pvslllo_vvvl __builtin_ve_vl_pvslllo_vvvl +#define _vel_pvslllo_vvvvl __builtin_ve_vl_pvslllo_vvvvl +#define _vel_pvslllo_vvsl __builtin_ve_vl_pvslllo_vvsl +#define _vel_pvslllo_vvsvl __builtin_ve_vl_pvslllo_vvsvl +#define _vel_pvslllo_vvvMvl __builtin_ve_vl_pvslllo_vvvMvl +#define _vel_pvslllo_vvsMvl __builtin_ve_vl_pvslllo_vvsMvl +#define _vel_pvsllup_vvvl __builtin_ve_vl_pvsllup_vvvl +#define _vel_pvsllup_vvvvl __builtin_ve_vl_pvsllup_vvvvl +#define _vel_pvsllup_vvsl __builtin_ve_vl_pvsllup_vvsl +#define _vel_pvsllup_vvsvl __builtin_ve_vl_pvsllup_vvsvl +#define _vel_pvsllup_vvvMvl __builtin_ve_vl_pvsllup_vvvMvl +#define _vel_pvsllup_vvsMvl __builtin_ve_vl_pvsllup_vvsMvl +#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl +#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl +#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl +#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl +#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl +#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl +#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl +#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl +#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl +#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl +#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl +#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl +#define _vel_pvsrllo_vvvl __builtin_ve_vl_pvsrllo_vvvl +#define _vel_pvsrllo_vvvvl __builtin_ve_vl_pvsrllo_vvvvl +#define _vel_pvsrllo_vvsl __builtin_ve_vl_pvsrllo_vvsl +#define _vel_pvsrllo_vvsvl __builtin_ve_vl_pvsrllo_vvsvl +#define _vel_pvsrllo_vvvMvl __builtin_ve_vl_pvsrllo_vvvMvl +#define _vel_pvsrllo_vvsMvl __builtin_ve_vl_pvsrllo_vvsMvl +#define _vel_pvsrlup_vvvl __builtin_ve_vl_pvsrlup_vvvl +#define _vel_pvsrlup_vvvvl __builtin_ve_vl_pvsrlup_vvvvl +#define _vel_pvsrlup_vvsl __builtin_ve_vl_pvsrlup_vvsl +#define _vel_pvsrlup_vvsvl __builtin_ve_vl_pvsrlup_vvsvl +#define _vel_pvsrlup_vvvMvl __builtin_ve_vl_pvsrlup_vvvMvl +#define _vel_pvsrlup_vvsMvl __builtin_ve_vl_pvsrlup_vvsMvl +#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl +#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl +#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl +#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl +#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl +#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl +#define _vel_vslaw_vvvl __builtin_ve_vl_vslaw_vvvl +#define _vel_vslaw_vvvvl __builtin_ve_vl_vslaw_vvvvl +#define _vel_vslaw_vvsl __builtin_ve_vl_vslaw_vvsl +#define _vel_vslaw_vvsvl __builtin_ve_vl_vslaw_vvsvl +#define _vel_vslaw_vvvmvl __builtin_ve_vl_vslaw_vvvmvl +#define _vel_vslaw_vvsmvl __builtin_ve_vl_vslaw_vvsmvl +#define _vel_pvslalo_vvvl __builtin_ve_vl_pvslalo_vvvl +#define _vel_pvslalo_vvvvl __builtin_ve_vl_pvslalo_vvvvl +#define _vel_pvslalo_vvsl __builtin_ve_vl_pvslalo_vvsl +#define _vel_pvslalo_vvsvl __builtin_ve_vl_pvslalo_vvsvl +#define _vel_pvslalo_vvvMvl __builtin_ve_vl_pvslalo_vvvMvl +#define _vel_pvslalo_vvsMvl __builtin_ve_vl_pvslalo_vvsMvl +#define _vel_pvslaup_vvvl __builtin_ve_vl_pvslaup_vvvl +#define _vel_pvslaup_vvvvl __builtin_ve_vl_pvslaup_vvvvl +#define _vel_pvslaup_vvsl __builtin_ve_vl_pvslaup_vvsl +#define _vel_pvslaup_vvsvl __builtin_ve_vl_pvslaup_vvsvl +#define _vel_pvslaup_vvvMvl __builtin_ve_vl_pvslaup_vvvMvl +#define _vel_pvslaup_vvsMvl __builtin_ve_vl_pvslaup_vvsMvl +#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl +#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl +#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl +#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl +#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl +#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl +#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl +#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl +#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl +#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl +#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl +#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl +#define _vel_vsraw_vvvl __builtin_ve_vl_vsraw_vvvl +#define _vel_vsraw_vvvvl __builtin_ve_vl_vsraw_vvvvl +#define _vel_vsraw_vvsl __builtin_ve_vl_vsraw_vvsl +#define _vel_vsraw_vvsvl __builtin_ve_vl_vsraw_vvsvl +#define _vel_vsraw_vvvmvl __builtin_ve_vl_vsraw_vvvmvl +#define _vel_vsraw_vvsmvl __builtin_ve_vl_vsraw_vvsmvl +#define _vel_pvsralo_vvvl __builtin_ve_vl_pvsralo_vvvl +#define _vel_pvsralo_vvvvl __builtin_ve_vl_pvsralo_vvvvl +#define _vel_pvsralo_vvsl __builtin_ve_vl_pvsralo_vvsl +#define _vel_pvsralo_vvsvl __builtin_ve_vl_pvsralo_vvsvl +#define _vel_pvsralo_vvvMvl __builtin_ve_vl_pvsralo_vvvMvl +#define _vel_pvsralo_vvsMvl __builtin_ve_vl_pvsralo_vvsMvl +#define _vel_pvsraup_vvvl __builtin_ve_vl_pvsraup_vvvl +#define _vel_pvsraup_vvvvl __builtin_ve_vl_pvsraup_vvvvl +#define _vel_pvsraup_vvsl __builtin_ve_vl_pvsraup_vvsl +#define _vel_pvsraup_vvsvl __builtin_ve_vl_pvsraup_vvsvl +#define _vel_pvsraup_vvvMvl __builtin_ve_vl_pvsraup_vvvMvl +#define _vel_pvsraup_vvsMvl __builtin_ve_vl_pvsraup_vvsMvl +#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl +#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl +#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl +#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl +#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl +#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl +#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl +#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl +#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl +#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl +#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl +#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl +#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl +#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl +#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl +#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl +#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl +#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl +#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl +#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl +#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl +#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl +#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl +#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl +#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl +#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl +#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl +#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl +#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl +#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl +#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl +#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl +#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl +#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl +#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl +#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl +#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl +#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl +#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl +#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl +#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl +#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl +#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl +#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl +#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl +#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl +#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl +#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl +#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl +#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl +#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl +#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl +#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl +#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl +#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl +#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl +#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl +#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl +#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl +#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl +#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl +#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl +#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl +#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl +#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl +#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl +#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl +#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl +#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl +#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl +#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl +#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl +#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl +#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl +#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl +#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl +#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl +#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl +#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl +#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl +#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl +#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl +#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl +#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl +#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl +#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl +#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl +#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl +#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl +#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl +#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl +#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl +#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl +#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl +#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl +#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl +#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl +#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl +#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl +#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl +#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl +#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl +#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl +#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl +#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl +#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl +#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl +#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl +#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl +#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl +#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl +#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl +#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl +#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl +#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl +#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl +#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl +#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl +#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl +#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl +#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl +#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl +#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl +#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl +#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl +#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl +#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl +#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl +#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl +#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl +#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl +#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl +#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl +#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl +#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl +#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl +#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl +#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl +#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl +#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl +#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl +#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl +#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl +#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl +#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl +#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl +#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl +#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl +#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl +#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl +#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl +#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl +#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl +#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl +#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl +#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl +#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl +#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl +#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl +#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl +#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl +#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl +#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl +#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl +#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl +#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl +#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl +#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl +#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl +#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl +#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl +#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl +#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl +#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl +#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl +#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl +#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl +#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl +#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl +#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl +#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl +#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl +#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl +#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl +#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl +#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl +#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl +#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl +#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl +#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl +#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl +#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl +#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl +#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl +#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl +#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl +#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl +#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl +#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl +#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl +#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl +#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl +#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl +#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl +#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl +#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl +#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl +#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl +#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl +#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl +#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl +#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl +#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl +#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl +#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl +#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl +#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl +#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl +#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl +#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl +#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl +#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl +#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl +#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl +#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl +#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl +#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl +#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl +#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl +#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl +#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl +#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl +#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl +#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl +#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl +#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl +#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl +#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl +#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl +#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl +#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl +#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl +#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl +#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl +#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl +#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl +#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl +#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl +#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl +#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl +#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl +#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl +#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl +#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl +#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl +#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl +#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl +#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl +#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl +#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl +#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl +#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl +#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl +#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl +#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl +#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl +#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl +#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl +#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl +#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl +#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl +#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl +#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl +#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl +#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl +#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl +#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl +#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl +#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl +#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl +#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl +#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl +#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl +#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl +#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl +#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl +#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl +#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl +#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl +#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl +#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl +#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl +#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl +#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl +#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl +#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl +#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl +#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl +#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl +#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl +#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl +#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl +#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl +#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl +#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl +#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl +#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl +#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl +#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl +#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl +#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl +#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl +#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl +#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml +#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl +#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml +#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl +#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl +#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl +#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl +#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl +#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl +#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl +#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl +#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl +#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml +#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml +#define _vel_pvfmkwloat_ml __builtin_ve_vl_pvfmkwloat_ml +#define _vel_pvfmkwupat_ml __builtin_ve_vl_pvfmkwupat_ml +#define _vel_pvfmkwloaf_ml __builtin_ve_vl_pvfmkwloaf_ml +#define _vel_pvfmkwupaf_ml __builtin_ve_vl_pvfmkwupaf_ml +#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml +#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml +#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl +#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml +#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl +#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml +#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl +#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml +#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl +#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml +#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl +#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml +#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl +#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml +#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl +#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml +#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl +#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml +#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl +#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml +#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl +#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml +#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl +#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml +#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl +#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml +#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl +#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml +#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl +#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml +#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl +#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml +#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl +#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml +#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl +#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml +#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl +#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml +#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl +#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml +#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl +#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml +#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl +#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml +#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl +#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml +#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl +#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml +#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl +#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml +#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl +#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml +#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl +#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml +#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl +#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml +#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl +#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml +#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl +#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl +#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml +#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml +#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl +#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl +#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml +#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml +#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl +#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl +#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml +#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml +#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl +#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl +#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml +#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml +#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl +#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl +#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml +#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml +#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl +#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl +#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml +#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml +#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl +#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl +#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml +#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml +#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl +#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl +#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml +#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml +#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl +#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl +#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml +#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml +#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl +#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl +#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml +#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml +#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl +#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl +#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml +#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml +#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl +#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl +#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml +#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml +#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl +#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl +#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml +#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml +#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl +#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl +#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml +#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml +#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl +#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl +#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl +#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl +#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl +#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl +#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl +#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl +#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl +#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl +#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl +#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl +#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl +#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl +#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl +#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl +#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl +#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl +#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl +#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl +#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl +#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl +#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl +#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl +#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl +#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl +#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl +#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl +#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl +#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml +#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl +#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml +#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl +#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml +#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl +#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml +#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl +#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml +#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl +#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml +#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl +#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml +#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl +#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml +#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl +#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml +#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl +#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml +#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl +#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml +#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl +#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml +#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl +#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml +#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl +#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml +#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl +#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml +#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl +#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml +#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl +#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml +#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl +#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml +#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl +#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml +#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl +#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml +#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl +#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml +#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl +#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml +#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl +#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml +#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl +#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml +#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl +#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml +#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl +#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml +#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl +#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml +#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl +#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml +#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl +#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl +#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml +#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml +#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl +#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl +#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml +#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml +#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl +#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl +#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml +#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml +#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl +#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl +#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml +#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml +#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl +#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl +#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml +#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml +#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl +#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl +#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml +#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml +#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl +#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl +#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml +#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml +#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl +#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl +#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml +#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml +#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl +#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl +#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml +#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml +#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl +#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl +#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml +#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml +#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl +#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl +#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml +#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml +#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl +#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl +#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml +#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml +#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl +#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl +#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml +#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml +#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl +#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl +#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml +#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml +#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl +#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl +#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl +#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl +#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl +#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl +#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl +#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl +#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl +#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl +#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl +#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl +#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl +#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl +#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl +#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl +#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl +#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl +#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl +#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl +#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl +#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl +#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl +#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl +#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl +#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl +#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl +#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl +#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl +#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml +#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl +#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml +#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl +#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml +#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl +#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml +#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl +#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml +#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl +#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl +#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl +#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl +#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl +#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl +#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl +#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl +#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl +#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl +#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl +#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl +#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl +#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl +#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl +#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl +#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl +#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl +#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl +#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl +#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl +#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl +#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl +#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl +#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl +#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl +#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl +#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl +#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl +#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl +#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl +#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl +#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl +#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl +#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl +#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl +#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl +#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl +#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl +#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl +#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl +#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml +#define _vel_vror_vvl __builtin_ve_vl_vror_vvl +#define _vel_vror_vvml __builtin_ve_vl_vror_vvml +#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl +#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml +#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl +#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl +#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml +#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl +#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl +#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl +#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml +#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl +#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl +#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl +#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml +#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl +#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl +#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl +#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml +#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl +#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl +#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl +#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml +#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl +#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl +#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl +#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml +#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl +#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl +#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl +#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml +#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl +#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl +#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl +#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml +#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl +#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl +#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml +#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl +#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml +#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl +#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml +#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl +#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml +#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl +#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml +#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl +#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml +#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl +#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml +#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl +#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml +#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl +#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml +#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl +#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml +#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl +#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml +#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl +#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml +#define _vel_andm_mmm __builtin_ve_vl_andm_mmm +#define _vel_andm_MMM __builtin_ve_vl_andm_MMM +#define _vel_orm_mmm __builtin_ve_vl_orm_mmm +#define _vel_orm_MMM __builtin_ve_vl_orm_MMM +#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm +#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM +#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm +#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM +#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm +#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM +#define _vel_negm_mm __builtin_ve_vl_negm_mm +#define _vel_negm_MM __builtin_ve_vl_negm_MM +#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml +#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml +#define _vel_tovm_sml __builtin_ve_vl_tovm_sml diff --git a/clang/test/Driver/ve-features.c b/clang/test/Driver/ve-features.c new file mode 100644 --- /dev/null +++ b/clang/test/Driver/ve-features.c @@ -0,0 +1,7 @@ +// RUN: %clang -target ve-unknown-linux -### %s -mvevec 2>&1 | FileCheck %s -check-prefix=VEVEC +// RUN: %clang -target ve-unknown-linux -### %s -mno-vevec 2>&1 | FileCheck %s -check-prefix=NO-VEVEC +// RUN: %clang -target ve-unknown-linux -### %s 2>&1 | FileCheck %s -check-prefix=DEFAULT + +// VEVEC: "-target-feature" "+vec" +// NO-VEVEC: "-target-feature" "-vec" +// DEFAULT: "-target-feature" "-vec" diff --git a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake --- a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake +++ b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake @@ -166,6 +166,7 @@ check_symbol_exists(__sparcv9 "" __SPARCV9) check_symbol_exists(__wasm32__ "" __WEBASSEMBLY32) check_symbol_exists(__wasm64__ "" __WEBASSEMBLY64) + check_symbol_exists(__ve__ "" __VE) if(__ARM) add_default_target_arch(arm) elseif(__AARCH64) @@ -200,6 +201,8 @@ add_default_target_arch(wasm32) elseif(__WEBASSEMBLY64) add_default_target_arch(wasm64) + elseif(__VE) + add_default_target_arch(ve) endif() endmacro() diff --git a/compiler-rt/cmake/base-config-ix.cmake b/compiler-rt/cmake/base-config-ix.cmake --- a/compiler-rt/cmake/base-config-ix.cmake +++ b/compiler-rt/cmake/base-config-ix.cmake @@ -236,6 +236,8 @@ test_target_arch(wasm32 "" "--target=wasm32-unknown-unknown") elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "wasm64") test_target_arch(wasm64 "" "--target=wasm64-unknown-unknown") + elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "ve") + test_target_arch(ve "__ve__" "--target=ve-unknown-none") endif() set(COMPILER_RT_OS_SUFFIX "") endif() diff --git a/compiler-rt/cmake/builtin-config-ix.cmake b/compiler-rt/cmake/builtin-config-ix.cmake --- a/compiler-rt/cmake/builtin-config-ix.cmake +++ b/compiler-rt/cmake/builtin-config-ix.cmake @@ -37,6 +37,7 @@ set(SPARCV9 sparcv9) set(WASM32 wasm32) set(WASM64 wasm64) +set(VE ve) if(APPLE) set(ARM64 arm64) @@ -45,7 +46,7 @@ endif() set(ALL_BUILTIN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} - ${HEXAGON} ${MIPS32} ${MIPS64} ${PPC64} ${RISCV32} ${RISCV64} ${SPARC} ${SPARCV9} ${WASM32} ${WASM64}) + ${HEXAGON} ${MIPS32} ${MIPS64} ${PPC64} ${RISCV32} ${RISCV64} ${SPARC} ${SPARCV9} ${WASM32} ${WASM64} ${VE}) include(CompilerRTUtils) include(CompilerRTDarwinUtils) diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake --- a/compiler-rt/cmake/config-ix.cmake +++ b/compiler-rt/cmake/config-ix.cmake @@ -250,6 +250,7 @@ set(SPARCV9 sparcv9) set(WASM32 wasm32) set(WASM64 wasm64) +set(VE ve) if(APPLE) set(ARM64 arm64) @@ -261,7 +262,7 @@ ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}) set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9}) -set(ALL_CRT_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV32} ${RISCV64}) +set(ALL_CRT_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV32} ${RISCV64} ${VE}) set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) if(ANDROID) diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt --- a/compiler-rt/lib/builtins/CMakeLists.txt +++ b/compiler-rt/lib/builtins/CMakeLists.txt @@ -561,6 +561,11 @@ ${GENERIC_SOURCES} ) +set(ve_SOURCES + ve/llvm_grow_stack.S + ${GENERIC_TF_SOURCES} + ${GENERIC_SOURCES}) + add_custom_target(builtins) set_target_properties(builtins PROPERTIES FOLDER "Compiler-RT Misc") diff --git a/compiler-rt/lib/builtins/ve/llvm_grow_stack.S b/compiler-rt/lib/builtins/ve/llvm_grow_stack.S new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/builtins/ve/llvm_grow_stack.S @@ -0,0 +1,32 @@ +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include "../assembly.h" + +// grow_stack routine +// This routine is VE specific +// https://www.nec.com/en/global/prod/hpc/aurora/document/VE-ABI_v1.1.pdf + +// destroy %s62 and %s63 only + +#ifdef __ve__ + +.text +.p2align 4 +DEFINE_COMPILERRT_FUNCTION(__llvm_grow_stack) + lea %s62, 15(%s0) # (s0 + 15) / 16 * 16 + and %s62, -16, %s62 + subu.l %sp, %sp, %s62 # sp -= alloca size + brge.l.t %sp, %sl, 1f + ld %s63, 0x18(,%tp) # load param area + lea %s62, 0x13b # syscall # of grow + shm.l %s62, 0x0(%s63) # stored at addr:0 + shm.l %sl, 0x8(%s63) # old limit at addr:8 + shm.l %sp, 0x10(%s63) # new limit at addr:16 + monc +1: + b.l (,%lr) +END_COMPILERRT_FUNCTION(__llvm_grow_stack) + +#endif // __ve__ diff --git a/libcxx/src/filesystem/operations.cpp b/libcxx/src/filesystem/operations.cpp --- a/libcxx/src/filesystem/operations.cpp +++ b/libcxx/src/filesystem/operations.cpp @@ -26,11 +26,16 @@ #include /* values for fchmodat */ #if defined(__linux__) +#if defined(__ve__) +#include +#define _LIBCPP_USE_SENDFILE +#else #include #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33) #include #define _LIBCPP_USE_SENDFILE #endif +#endif #elif defined(__APPLE__) || __has_include() #include #define _LIBCPP_USE_COPYFILE diff --git a/libcxxabi/cmake/config-ix.cmake b/libcxxabi/cmake/config-ix.cmake --- a/libcxxabi/cmake/config-ix.cmake +++ b/libcxxabi/cmake/config-ix.cmake @@ -27,6 +27,9 @@ list(APPEND CMAKE_REQUIRED_FLAGS -rtlib=compiler-rt) find_compiler_rt_library(builtins LIBCXXABI_BUILTINS_LIBRARY) list(APPEND CMAKE_REQUIRED_LIBRARIES "${LIBCXXABI_BUILTINS_LIBRARY}") + # CMAKE_REQUIRED_LIBRARIES is not used to link libc++abi.so, so + # append builtins to LIBCXXABI_SHARED_LIBRARIES too + list(APPEND LIBCXXABI_SHARED_LIBRARIES "${LIBCXXABI_BUILTINS_LIBRARY}") else () if (LIBCXXABI_HAS_GCC_S_LIB) list(APPEND CMAKE_REQUIRED_LIBRARIES gcc_s) diff --git a/libunwind/include/__libunwind_config.h b/libunwind/include/__libunwind_config.h --- a/libunwind/include/__libunwind_config.h +++ b/libunwind/include/__libunwind_config.h @@ -23,6 +23,7 @@ #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_OR1K 32 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_MIPS 65 #define _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC 31 +#define _LIBUNWIND_HIGHEST_DWARF_REGISTER_VE 143 #if defined(_LIBUNWIND_IS_NATIVE_ONLY) # if defined(__i386__) @@ -118,6 +119,11 @@ #define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_SPARC #define _LIBUNWIND_CONTEXT_SIZE 16 #define _LIBUNWIND_CURSOR_SIZE 23 +# elif defined(__ve__) +# define _LIBUNWIND_TARGET_VE +# define _LIBUNWIND_CONTEXT_SIZE 74 +# define _LIBUNWIND_CURSOR_SIZE 15 +# define _LIBUNWIND_HIGHEST_DWARF_REGISTER _LIBUNWIND_HIGHEST_DWARF_REGISTER_VE # else # error "Unsupported architecture." # endif diff --git a/libunwind/include/libunwind.h b/libunwind/include/libunwind.h --- a/libunwind/include/libunwind.h +++ b/libunwind/include/libunwind.h @@ -832,4 +832,75 @@ UNW_SPARC_I7 = 31, }; +// VE register numbers +enum { + UNW_VE_S0 = 0, + UNW_VE_S1 = 1, + UNW_VE_S2 = 2, + UNW_VE_S3 = 3, + UNW_VE_S4 = 4, + UNW_VE_S5 = 5, + UNW_VE_S6 = 6, + UNW_VE_S7 = 7, + UNW_VE_S8 = 8, + UNW_VE_S9 = 9, + UNW_VE_S10 = 10, + UNW_VE_S11 = 11, + UNW_VE_S12 = 12, + UNW_VE_S13 = 13, + UNW_VE_S14 = 14, + UNW_VE_S15 = 15, + UNW_VE_S16 = 16, + UNW_VE_S17 = 17, + UNW_VE_S18 = 18, + UNW_VE_S19 = 19, + UNW_VE_S20 = 20, + UNW_VE_S21 = 21, + UNW_VE_S22 = 22, + UNW_VE_S23 = 23, + UNW_VE_S24 = 24, + UNW_VE_S25 = 25, + UNW_VE_S26 = 26, + UNW_VE_S27 = 27, + UNW_VE_S28 = 28, + UNW_VE_S29 = 29, + UNW_VE_S30 = 30, + UNW_VE_S31 = 31, + UNW_VE_S32 = 32, + UNW_VE_S33 = 33, + UNW_VE_S34 = 34, + UNW_VE_S35 = 35, + UNW_VE_S36 = 36, + UNW_VE_S37 = 37, + UNW_VE_S38 = 38, + UNW_VE_S39 = 39, + UNW_VE_S40 = 40, + UNW_VE_S41 = 41, + UNW_VE_S42 = 42, + UNW_VE_S43 = 43, + UNW_VE_S44 = 44, + UNW_VE_S45 = 45, + UNW_VE_S46 = 46, + UNW_VE_S47 = 47, + UNW_VE_S48 = 48, + UNW_VE_S49 = 49, + UNW_VE_S50 = 50, + UNW_VE_S51 = 51, + UNW_VE_S52 = 52, + UNW_VE_S53 = 53, + UNW_VE_S54 = 54, + UNW_VE_S55 = 55, + UNW_VE_S56 = 56, + UNW_VE_S57 = 57, + UNW_VE_S58 = 58, + UNW_VE_S59 = 59, + UNW_VE_S60 = 60, + UNW_VE_S61 = 61, + UNW_VE_S62 = 62, + UNW_VE_S63 = 63, + + UNW_VE_VIXR = 144, + UNW_VE_VL = 145, +}; + #endif diff --git a/libunwind/src/Registers.hpp b/libunwind/src/Registers.hpp --- a/libunwind/src/Registers.hpp +++ b/libunwind/src/Registers.hpp @@ -3517,6 +3517,290 @@ } #endif // _LIBUNWIND_TARGET_SPARC +#if defined(_LIBUNWIND_TARGET_VE) +/// Registers_ve holds the register state of a thread in a VE process. +class _LIBUNWIND_HIDDEN Registers_ve { +public: + Registers_ve(); + Registers_ve(const void *registers); + + bool validRegister(int num) const; + uint64_t getRegister(int num) const; + void setRegister(int num, uint64_t value); + bool validFloatRegister(int num) const; + double getFloatRegister(int num) const; + void setFloatRegister(int num, double value); + bool validVectorRegister(int num) const; + v128 getVectorRegister(int num) const; + void setVectorRegister(int num, v128 value); + const char *getRegisterName(int num); + void jumpto(); + static int lastDwarfRegNum() { return _LIBUNWIND_HIGHEST_DWARF_REGISTER_VE; } + + uint64_t getSP() const { return _registers.__s[11]; } + void setSP(uint64_t value) { _registers.__s[11] = value; } + uint64_t getIP() const { return _registers.__ic; } + void setIP(uint64_t value) { _registers.__ic = value; } + +private: + struct ve_thread_state_t { + uint64_t __s[64]; // s0-s64 + uint64_t __ic; // Instruction counter (IC) + uint64_t __vixr; // Vector Index Register + uint64_t __vl; // Vector Length Register + }; + + ve_thread_state_t _registers; // total 67 registers + + // Currently no vector registers is preserved since libunwind + // supports only 128-bit vector registers. + + static int getScalarRegNum(int num); +}; + +inline Registers_ve::Registers_ve(const void *registers) { + static_assert((check_fit::does_fit), + "ve registers do not fit into unw_context_t"); + memcpy(&_registers, static_cast(registers), + sizeof(_registers)); + static_assert(sizeof(_registers) == 536, + "expected vector register offset to be 536"); +} + +inline Registers_ve::Registers_ve() { + memset(&_registers, 0, sizeof(_registers)); +} + +inline bool Registers_ve::validRegister(int regNum) const { + if (regNum >= UNW_VE_S0 && regNum <= UNW_VE_S63) + return true; + + switch (regNum) { + case UNW_REG_IP: + case UNW_REG_SP: + case UNW_VE_VIXR: + case UNW_VE_VL: + return true; + default: + return false; + } +} + +inline int Registers_ve::getScalarRegNum(int num) +{ + if (num >= UNW_VE_S0 && num <= UNW_VE_S63) + return num - UNW_VE_S0; + return 0; +} + +inline uint64_t Registers_ve::getRegister(int regNum) const { + if (num >= UNW_VE_S0 && num <= UNW_VE_S63) + return _registers.__s[getScalarRegNum(regNum)]; + + switch (regNum) { + case UNW_REG_IP: + return _registers.__ic; + case UNW_REG_SP: + return _registers.__s[11]; + case UNW_VE_VIXR: + return _registers.__vixr; + case UNW_VE_VL: + return _registers.__vl; + } + _LIBUNWIND_ABORT("unsupported ve register"); +} + +inline void Registers_ve::setRegister(int regNum, uint64_t value) { + if (num >= UNW_VE_S0 && num <= UNW_VE_S63) { + _registers.__s[getScalarRegNum(regNum)] = value; + return; + } + + switch (regNum) { + case UNW_REG_IP: + _registers.__ic = value; + return; + case UNW_REG_SP: + _registers.__s[11] = value; + return; + case UNW_VE_VIXR: + _registers.__vixr = value; + return; + case UNW_VE_VL: + _registers.__vl = value; + return; + } + _LIBUNWIND_ABORT("unsupported ve register"); +} + +inline bool Registers_ve::validFloatRegister(int /* regNum */) const { + return false; +} + +inline double Registers_ve::getFloatRegister(int /* regNum */) const { + _LIBUNWIND_ABORT("VE float support not implemented"); +} + +inline void Registers_ve::setFloatRegister(int /* regNum */, + double /* value */) { + _LIBUNWIND_ABORT("VE float support not implemented"); +} + +inline bool Registers_ve::validVectorRegister(int /* regNum */) const { + return false; +} + +inline v128 Registers_ve::getVectorRegister(int /* regNum */) const { + _LIBUNWIND_ABORT("VE vector support not implemented"); +} + +inline void Registers_ve::setVectorRegister(int /* regNum */, v128 /* value */) { + _LIBUNWIND_ABORT("VE vector support not implemented"); +} + +inline const char *Registers_ve::getRegisterName(int regNum) { + switch (regNum) { + case UNW_REG_IP: + return "ip"; + case UNW_REG_SP: + return "sp"; + case UNW_VE_VIXR: + return "vixr"; + case UNW_VE_VL: + return "vl"; + case UNW_VE_S0: + return "s0"; + case UNW_VE_S1: + return "s1"; + case UNW_VE_S2: + return "s2"; + case UNW_VE_S3: + return "s3"; + case UNW_VE_S4: + return "s4"; + case UNW_VE_S5: + return "s5"; + case UNW_VE_S6: + return "s6"; + case UNW_VE_S7: + return "s7"; + case UNW_VE_S8: + return "s8"; + case UNW_VE_S9: + return "s9"; + case UNW_VE_S10: + return "s10"; + case UNW_VE_S11: + return "s11"; + case UNW_VE_S12: + return "s12"; + case UNW_VE_S13: + return "s13"; + case UNW_VE_S14: + return "s14"; + case UNW_VE_S15: + return "s15"; + case UNW_VE_S16: + return "s16"; + case UNW_VE_S17: + return "s17"; + case UNW_VE_S18: + return "s18"; + case UNW_VE_S19: + return "s19"; + case UNW_VE_S20: + return "s20"; + case UNW_VE_S21: + return "s21"; + case UNW_VE_S22: + return "s22"; + case UNW_VE_S23: + return "s23"; + case UNW_VE_S24: + return "s24"; + case UNW_VE_S25: + return "s25"; + case UNW_VE_S26: + return "s26"; + case UNW_VE_S27: + return "s27"; + case UNW_VE_S28: + return "s28"; + case UNW_VE_S29: + return "s29"; + case UNW_VE_S30: + return "s30"; + case UNW_VE_S31: + return "s31"; + case UNW_VE_S32: + return "s32"; + case UNW_VE_S33: + return "s33"; + case UNW_VE_S34: + return "s34"; + case UNW_VE_S35: + return "s35"; + case UNW_VE_S36: + return "s36"; + case UNW_VE_S37: + return "s37"; + case UNW_VE_S38: + return "s38"; + case UNW_VE_S39: + return "s39"; + case UNW_VE_S40: + return "s40"; + case UNW_VE_S41: + return "s41"; + case UNW_VE_S42: + return "s42"; + case UNW_VE_S43: + return "s43"; + case UNW_VE_S44: + return "s44"; + case UNW_VE_S45: + return "s45"; + case UNW_VE_S46: + return "s46"; + case UNW_VE_S47: + return "s47"; + case UNW_VE_S48: + return "s48"; + case UNW_VE_S49: + return "s49"; + case UNW_VE_S50: + return "s50"; + case UNW_VE_S51: + return "s51"; + case UNW_VE_S52: + return "s52"; + case UNW_VE_S53: + return "s53"; + case UNW_VE_S54: + return "s54"; + case UNW_VE_S55: + return "s55"; + case UNW_VE_S56: + return "s56"; + case UNW_VE_S57: + return "s57"; + case UNW_VE_S58: + return "s58"; + case UNW_VE_S59: + return "s59"; + case UNW_VE_S60: + return "s60"; + case UNW_VE_S61: + return "s61"; + case UNW_VE_S62: + return "s62"; + case UNW_VE_S63: + return "s63"; + } + return "unknown register"; +} +#endif // _LIBUNWIND_TARGET_VE + } // namespace libunwind #endif // __REGISTERS_HPP__ diff --git a/libunwind/src/Unwind-sjlj.c b/libunwind/src/Unwind-sjlj.c --- a/libunwind/src/Unwind-sjlj.c +++ b/libunwind/src/Unwind-sjlj.c @@ -32,11 +32,21 @@ // next function in stack of handlers struct _Unwind_FunctionContext *prev; +#if defined(__ve__) + // VE requires to store 64 bit pointers as a part of these data. + + // set by calling function before registering to be the landing pad + uintptr_t resumeLocation; + + // set by personality handler to be parameters passed to landing pad function + uintptr_t resumeParameters[4]; +#else // set by calling function before registering to be the landing pad uint32_t resumeLocation; // set by personality handler to be parameters passed to landing pad function uint32_t resumeParameters[4]; +#endif // set by calling function before registering __personality_routine personality; // arm offset=24 diff --git a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp --- a/libunwind/src/libunwind.cpp +++ b/libunwind/src/libunwind.cpp @@ -58,6 +58,9 @@ # warning The MIPS architecture is not supported with this ABI and environment! #elif defined(__sparc__) # define REGISTER_KIND Registers_sparc +#elif defined(__ve__) +# warning The VE architecture is not supported with this ABI and environment! +# define REGISTER_KIND Registers_ve #else # error Architecture not supported #endif diff --git a/llvm/CODE_OWNERS.TXT b/llvm/CODE_OWNERS.TXT --- a/llvm/CODE_OWNERS.TXT +++ b/llvm/CODE_OWNERS.TXT @@ -150,6 +150,10 @@ E: me@dylanmckay.io D: AVR Backend +N: Simon Moll +E: simon.moll@emea.nec.com +D: VE Backend + N: Tim Northover E: t.p.northover@gmail.com D: AArch64 backend, misc ARM backend diff --git a/llvm/docs/VE/VectorLength.rst b/llvm/docs/VE/VectorLength.rst new file mode 100644 --- /dev/null +++ b/llvm/docs/VE/VectorLength.rst @@ -0,0 +1,192 @@ +======================================= +How to use Vector Length register on VE +======================================= + +.. contents:: Table of Contents + :depth: 4 + :local: + +Introduction +============ + +This is a description of Vector Length register on VE and its usage, how to +generate MIR, how to generate intrinsic IR, etc. Assembler instructions are +available at +(https://www.hpc.nec/documents/sdk/pdfs/VectorEngine-as-manual-v1.2.pdf). + +What is Vector Length register +============================== + +A Vector Length register (VL) is implicitly specified from almost all +vector instructions to represent the length of vector calculation of +the instruction. For example, `vadds.w.sx` instruction calculates +v256i32 when VL is 256. The same instruction calculates v128i32 when +VL is 128. + +This VL can holds a value from 0 to 256 on the first generation of Vector +Engine. + +What is a problem +================= + +VL register is implicitly specified. That means llvm cannot track +its liveness information if we implement this VL by implicit-def/use. + +For example, if we have following program, we need to spill/restore +vector registers and VL register since function calls destroy them. +However, it is difficult to restore correct VL at vfaddd intrinsic +without information to track the VL register. + +``` + _ve_lvl(32); // specify 32 to VL + __vr vy = _ve_vld_vss(8, pvy); + __vr vz = _ve_vld_vss(8, pvz); + dump(vy); + dump(vz); + __vr vx = _ve_vfaddd_vvv(vy, vz); +``` + +How to solve the problem +======================== + +We decide to specify VL register at each vector instruction explicitly +and let register allocator allocates VL register even if only one VL +register available on VE. This gives llvm enough information to track the +VL register. + +For example, above example code is converted to following MIR. +Then, we can spill/restore %21 virtual register correctly. + +``` + %20:i64 = LEAzzi 32 + %21:vls = COPY %20:i64 + %23:v64 = VLDir 8, killed %22:i64, %21:vls + %25:v64 = VLDir 8, killed %24:i64, %21:vls + CALLr $sx12, ... + CALLr $sx12, ... + %32:v64 = VFADdv killed %23:v64, killed %25:v64, %21:vls +``` + +In order to do so, we need to add a new path to add VL explicitly +and form SSA correctly. + +How to create new MIR with VL +============================= + +If you need a new VL, you can create it like below. We generaly add +the VL as the last operand. + +``` + unsigned Tmp1 = MF.getRegInfo().createVirtualRegister(&VE::I32RegClass); + BuildMI(MBB, MBBI, dl, TII.get(VE::LEAzzi), Tmp1) + .addImm(128); + unsigned VLReg = MF.getRegInfo().createVirtualRegister(&VE::VLSRegClass); + BuildMI(MBB, MBBI, dl, TII.get(VE::COPY), VLReg) + .addReg(Tmp1, getKillRegState(true)); + BuildMI(MBB, MBBI, dl, TII.get(VE::VFMADsv), DestReg) + .addReg(V5).addReg(V5).addReg(V4) + .addReg(VLReg, getKillRegState(true)); +``` + +Or like below in DAG (this example uses existing VL). + +``` + unsigned VLReg = Subtarget->getInstrInfo()->getVectorLengthReg(&MF); + SDValue VL = DAG.getCopyFromReg(DAG.getEntryNode(), dl, VLReg, MVT::i32); + SDValue V5 = SDValue(DAG.getMachineNode(VE::VRCPsv, dl, VT, V1, VL), 0); +``` + +Latter example uses single virtual VL register at conversion, but codes +will be modified to refer correct virtual VL register in finalizeLowering(). + +How to create new MIR with VL in tblgen +======================================= + +If you need a new VL, you can create it like below. + +``` +def : Pat<(v512i32 (load ADDRri:$addr)), + (v512i32 (VLDir 8, (LEAasx ADDRri:$addr), + (COPY_TO_REGCLASS (LEAzzi 256), VLS)))>; +``` + +Or you can refer existing VL like below. This `(GetVL (i32 0))` returns +the default virtual VL register defined in MachineFunction. + +``` + def : Pat<(int_ve_vscot_vv v256f64:$vx, v256f64:$vy), + (VSCotv v256f64:$vx, v256f64:$vy, (GetVL (i32 0)))>; +``` + +This example uses single virtual VL register at conversion, but codes +will be modified to refer correct virtual VL register in finalizeLowering(). + +Details of forming SSA form for VL register +=========================================== + +At conversion, we define single default virtual VL register per +MachineFunction. All vector instructions needed to refer existing +VL regsiter use this default virtual VL register. However, `_ve_lvl` +intrinsic which defines a new VL is converted to an instruction +defines physical VL register since re-defining existing virtual VL +register breaks SSA. + +For example, let's consider about following inputs. + +``` +.bb.0: + ... + +.bb.1: + _ve_lvl(32); + bra .bb.3 + +.bb.2: + _ve_lvl(l); + +.bb.3: + __vr vx = _ve_vfaddd_vvv(vy, vz); +``` + +This is converted like below in the middle of MIR lowering. + +``` +.bb.0: + %0:vls = COPY $vl ; copy incoming $vl to the default virtual VL register + ... + +.bb.1: + $vl = COPY 32 ; copy new value to $vl temporary to not break SSA + bra .bb.3 + +.bb.2: + $vl = COPY %l:i32 ; copy new value to $vl temporary to not break SSA + +.bb.3: + %32:v64 = VFADdv killed %23:v64, killed %25:v64, %0:vls +``` + +And, this is converted like below at `finalizeLowering()`. + +``` +.bb.0: + %0:vls = COPY $vl + ... + +.bb.1: + %1:vls = COPY 32 ; create new virtual VL at finalize phase. + bra .bb.3 + +.bb.2: + %2:vls = COPY %l:i32 ; create new virtual VL at finalize phase. + +.bb.3: + %3:vls = PHI %1:vls, %bb.1, %2:vls, %bb.2 ; create new PHI to form SSA + ; correctly at finalize phase. + %32:v64 = VFADdv killed %23:v64, killed %25:v64, %3:vls +``` + +We would like to use mem2reg here, but it is difficult to use mem2reg +in the middle of MIR lowering, so we implemented our own SSA stuff +in `finalizeLowering()`. + diff --git a/llvm/include/llvm/ADT/Triple.h b/llvm/include/llvm/ADT/Triple.h --- a/llvm/include/llvm/ADT/Triple.h +++ b/llvm/include/llvm/ADT/Triple.h @@ -95,7 +95,8 @@ wasm64, // WebAssembly with 64-bit pointers renderscript32, // 32-bit RenderScript renderscript64, // 64-bit RenderScript - LastArchType = renderscript64 + ve, // NEC SX-Aurora Vector Engine + LastArchType = ve }; enum SubArchType { NoSubArch, @@ -730,6 +731,11 @@ return getArch() == Triple::riscv32 || getArch() == Triple::riscv64; } + /// Tests whether the target is VE + bool isVE() const { + return getArch() == Triple::ve; + } + /// Tests whether the target supports comdat bool supportsCOMDAT() const { return !isOSBinFormatMachO(); diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h --- a/llvm/include/llvm/BinaryFormat/ELF.h +++ b/llvm/include/llvm/BinaryFormat/ELF.h @@ -311,6 +311,7 @@ EM_RISCV = 243, // RISC-V EM_LANAI = 244, // Lanai 32-bit processor EM_BPF = 247, // Linux kernel bpf virtual machine + EM_VE = 251, // NEC SX-Aurora VE }; // Object file classes. diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h --- a/llvm/include/llvm/CodeGen/Passes.h +++ b/llvm/include/llvm/CodeGen/Passes.h @@ -342,7 +342,7 @@ /// createSjLjEHPreparePass - This pass adapts exception handling code to use /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow. /// - FunctionPass *createSjLjEHPreparePass(); + FunctionPass *createSjLjEHPreparePass(bool Use64BitsData = false); /// createWasmEHPass - This pass adapts exception handling code to use /// WebAssembly's exception handling scheme. diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h --- a/llvm/include/llvm/CodeGen/ValueTypes.h +++ b/llvm/include/llvm/CodeGen/ValueTypes.h @@ -203,6 +203,21 @@ return isSimple() ? V.is2048BitVector() : isExtended2048BitVector(); } + /// Return true if this is a 4096-bit vector type. + bool is4096BitVector() const { + return isSimple() ? V.is4096BitVector() : isExtended4096BitVector(); + } + + /// Return true if this is a 8192-bit vector type. + bool is8192BitVector() const { + return isSimple() ? V.is8192BitVector() : isExtended8192BitVector(); + } + + /// Return true if this is a 16384-bit vector type. + bool is16384BitVector() const { + return isSimple() ? V.is16384BitVector() : isExtended16384BitVector(); + } + /// Return true if this is an overloaded type for TableGen. bool isOverloaded() const { return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny); @@ -442,6 +457,9 @@ bool isExtended512BitVector() const LLVM_READONLY; bool isExtended1024BitVector() const LLVM_READONLY; bool isExtended2048BitVector() const LLVM_READONLY; + bool isExtended4096BitVector() const LLVM_READONLY; + bool isExtended8192BitVector() const LLVM_READONLY; + bool isExtended16384BitVector() const LLVM_READONLY; EVT getExtendedVectorElementType() const; unsigned getExtendedVectorNumElements() const LLVM_READONLY; TypeSize getExtendedSizeInBits() const LLVM_READONLY; diff --git a/llvm/include/llvm/CodeGen/ValueTypes.td b/llvm/include/llvm/CodeGen/ValueTypes.td --- a/llvm/include/llvm/CodeGen/ValueTypes.td +++ b/llvm/include/llvm/CodeGen/ValueTypes.td @@ -85,87 +85,95 @@ def v8i64 : ValueType<512, 60>; // 8 x i64 vector value def v16i64 : ValueType<1024,61>; // 16 x i64 vector value def v32i64 : ValueType<2048,62>; // 32 x i64 vector value - -def v1i128 : ValueType<128, 63>; // 1 x i128 vector value - -def v2f16 : ValueType<32 , 64>; // 2 x f16 vector value -def v3f16 : ValueType<48 , 65>; // 3 x f16 vector value -def v4f16 : ValueType<64 , 66>; // 4 x f16 vector value -def v8f16 : ValueType<128, 67>; // 8 x f16 vector value -def v16f16 : ValueType<256, 68>; // 8 x f16 vector value -def v32f16 : ValueType<512, 69>; // 8 x f16 vector value -def v1f32 : ValueType<32 , 70>; // 1 x f32 vector value -def v2f32 : ValueType<64 , 71>; // 2 x f32 vector value -def v3f32 : ValueType<96 , 72>; // 3 x f32 vector value -def v4f32 : ValueType<128, 73>; // 4 x f32 vector value -def v5f32 : ValueType<160, 74>; // 5 x f32 vector value -def v8f32 : ValueType<256, 75>; // 8 x f32 vector value -def v16f32 : ValueType<512, 76>; // 16 x f32 vector value -def v32f32 : ValueType<1024, 77>; // 32 x f32 vector value -def v64f32 : ValueType<2048, 78>; // 64 x f32 vector value -def v128f32 : ValueType<4096, 79>; // 128 x f32 vector value -def v256f32 : ValueType<8182, 80>; // 256 x f32 vector value -def v512f32 : ValueType<16384, 81>; // 512 x f32 vector value -def v1024f32 : ValueType<32768, 82>; // 1024 x f32 vector value -def v2048f32 : ValueType<65536, 83>; // 2048 x f32 vector value -def v1f64 : ValueType<64, 84>; // 1 x f64 vector value -def v2f64 : ValueType<128, 85>; // 2 x f64 vector value -def v4f64 : ValueType<256, 86>; // 4 x f64 vector value -def v8f64 : ValueType<512, 87>; // 8 x f64 vector value - -def nxv1i1 : ValueType<1, 88>; // n x 1 x i1 vector value -def nxv2i1 : ValueType<2, 89>; // n x 2 x i1 vector value -def nxv4i1 : ValueType<4, 90>; // n x 4 x i1 vector value -def nxv8i1 : ValueType<8, 91>; // n x 8 x i1 vector value -def nxv16i1 : ValueType<16, 92>; // n x 16 x i1 vector value -def nxv32i1 : ValueType<32, 93>; // n x 32 x i1 vector value - -def nxv1i8 : ValueType<8, 94>; // n x 1 x i8 vector value -def nxv2i8 : ValueType<16, 95>; // n x 2 x i8 vector value -def nxv4i8 : ValueType<32, 96>; // n x 4 x i8 vector value -def nxv8i8 : ValueType<64, 97>; // n x 8 x i8 vector value -def nxv16i8 : ValueType<128, 98>; // n x 16 x i8 vector value -def nxv32i8 : ValueType<256, 99>; // n x 32 x i8 vector value - -def nxv1i16 : ValueType<16, 100>; // n x 1 x i16 vector value -def nxv2i16 : ValueType<32, 101>; // n x 2 x i16 vector value -def nxv4i16 : ValueType<64, 102>; // n x 4 x i16 vector value -def nxv8i16 : ValueType<128, 103>; // n x 8 x i16 vector value -def nxv16i16: ValueType<256, 104>; // n x 16 x i16 vector value -def nxv32i16: ValueType<512, 105>; // n x 32 x i16 vector value - -def nxv1i32 : ValueType<32, 106>; // n x 1 x i32 vector value -def nxv2i32 : ValueType<64, 107>; // n x 2 x i32 vector value -def nxv4i32 : ValueType<128, 108>; // n x 4 x i32 vector value -def nxv8i32 : ValueType<256, 109>; // n x 8 x i32 vector value -def nxv16i32: ValueType<512, 110>; // n x 16 x i32 vector value -def nxv32i32: ValueType<1024,111>; // n x 32 x i32 vector value - -def nxv1i64 : ValueType<64, 112>; // n x 1 x i64 vector value -def nxv2i64 : ValueType<128, 113>; // n x 2 x i64 vector value -def nxv4i64 : ValueType<256, 114>; // n x 4 x i64 vector value -def nxv8i64 : ValueType<512, 115>; // n x 8 x i64 vector value -def nxv16i64: ValueType<1024,116>; // n x 16 x i64 vector value -def nxv32i64: ValueType<2048,117>; // n x 32 x i64 vector value - -def nxv2f16 : ValueType<32 , 118>; // n x 2 x f16 vector value -def nxv4f16 : ValueType<64 , 119>; // n x 4 x f16 vector value -def nxv8f16 : ValueType<128, 120>; // n x 8 x f16 vector value -def nxv1f32 : ValueType<32 , 121>; // n x 1 x f32 vector value -def nxv2f32 : ValueType<64 , 122>; // n x 2 x f32 vector value -def nxv4f32 : ValueType<128, 123>; // n x 4 x f32 vector value -def nxv8f32 : ValueType<256, 124>; // n x 8 x f32 vector value -def nxv16f32 : ValueType<512, 125>; // n x 16 x f32 vector value -def nxv1f64 : ValueType<64, 126>; // n x 1 x f64 vector value -def nxv2f64 : ValueType<128, 127>; // n x 2 x f64 vector value -def nxv4f64 : ValueType<256, 128>; // n x 4 x f64 vector value -def nxv8f64 : ValueType<512, 129>; // n x 8 x f64 vector value - -def x86mmx : ValueType<64 , 130>; // X86 MMX value -def FlagVT : ValueType<0 , 131>; // Pre-RA sched glue -def isVoid : ValueType<0 , 132>; // Produces no value -def untyped: ValueType<8 , 133>; // Produces an untyped value -def exnref: ValueType<0, 134>; // WebAssembly's exnref type +def v64i64 : ValueType<4096,63>; // 64 x i64 vector value +def v128i64: ValueType<8192,64>; // 128 x i64 vector value +def v256i64: ValueType<16384,65>; // 256 x i64 vector value + +def v1i128 : ValueType<128, 66>; // 1 x i128 vector value + +def v2f16 : ValueType<32 , 67>; // 2 x f16 vector value +def v3f16 : ValueType<48 , 68>; // 3 x f16 vector value +def v4f16 : ValueType<64 , 69>; // 4 x f16 vector value +def v8f16 : ValueType<128, 70>; // 8 x f16 vector value +def v16f16 : ValueType<256, 71>; // 8 x f16 vector value +def v32f16 : ValueType<512, 72>; // 8 x f16 vector value +def v1f32 : ValueType<32 , 73>; // 1 x f32 vector value +def v2f32 : ValueType<64 , 74>; // 2 x f32 vector value +def v3f32 : ValueType<96 , 75>; // 3 x f32 vector value +def v4f32 : ValueType<128, 76>; // 4 x f32 vector value +def v5f32 : ValueType<160, 77>; // 5 x f32 vector value +def v8f32 : ValueType<256, 78>; // 8 x f32 vector value +def v16f32 : ValueType<512, 79>; // 16 x f32 vector value +def v32f32 : ValueType<1024, 80>; // 32 x f32 vector value +def v64f32 : ValueType<2048, 81>; // 64 x f32 vector value +def v128f32 : ValueType<4096, 82>; // 128 x f32 vector value +def v256f32 : ValueType<8182, 83>; // 256 x f32 vector value +def v512f32 : ValueType<16384, 84>; // 512 x f32 vector value +def v1024f32 : ValueType<32768, 85>; // 1024 x f32 vector value +def v2048f32 : ValueType<65536, 86>; // 2048 x f32 vector value +def v1f64 : ValueType<64, 87>; // 1 x f64 vector value +def v2f64 : ValueType<128, 88>; // 2 x f64 vector value +def v4f64 : ValueType<256, 89>; // 4 x f64 vector value +def v8f64 : ValueType<512, 90>; // 8 x f64 vector value +def v16f64 : ValueType<1024,91>; // 16 x f64 vector value +def v32f64 : ValueType<2048,92>; // 32 x f64 vector value +def v64f64 : ValueType<4096,93>; // 64 x f64 vector value +def v128f64 : ValueType<8192,94>; // 128 x f64 vector value +def v256f64 : ValueType<16384,95>; // 256 x f64 vector value + +def nxv1i1 : ValueType<1, 96>; // n x 1 x i1 vector value +def nxv2i1 : ValueType<2, 97>; // n x 2 x i1 vector value +def nxv4i1 : ValueType<4, 98>; // n x 4 x i1 vector value +def nxv8i1 : ValueType<8, 99>; // n x 8 x i1 vector value +def nxv16i1 : ValueType<16, 100>; // n x 16 x i1 vector value +def nxv32i1 : ValueType<32, 101>; // n x 32 x i1 vector value + +def nxv1i8 : ValueType<8, 102>; // n x 1 x i8 vector value +def nxv2i8 : ValueType<16, 103>; // n x 2 x i8 vector value +def nxv4i8 : ValueType<32, 104>; // n x 4 x i8 vector value +def nxv8i8 : ValueType<64, 105>; // n x 8 x i8 vector value +def nxv16i8 : ValueType<128, 106>; // n x 16 x i8 vector value +def nxv32i8 : ValueType<256, 107>; // n x 32 x i8 vector value + +def nxv1i16 : ValueType<16, 108>; // n x 1 x i16 vector value +def nxv2i16 : ValueType<32, 109>; // n x 2 x i16 vector value +def nxv4i16 : ValueType<64, 110>; // n x 4 x i16 vector value +def nxv8i16 : ValueType<128, 111>; // n x 8 x i16 vector value +def nxv16i16: ValueType<256, 112>; // n x 16 x i16 vector value +def nxv32i16: ValueType<512, 113>; // n x 32 x i16 vector value + +def nxv1i32 : ValueType<32, 114>; // n x 1 x i32 vector value +def nxv2i32 : ValueType<64, 115>; // n x 2 x i32 vector value +def nxv4i32 : ValueType<128, 116>; // n x 4 x i32 vector value +def nxv8i32 : ValueType<256, 117>; // n x 8 x i32 vector value +def nxv16i32: ValueType<512, 118>; // n x 16 x i32 vector value +def nxv32i32: ValueType<1024,119>; // n x 32 x i32 vector value + +def nxv1i64 : ValueType<64, 120>; // n x 1 x i64 vector value +def nxv2i64 : ValueType<128, 121>; // n x 2 x i64 vector value +def nxv4i64 : ValueType<256, 122>; // n x 4 x i64 vector value +def nxv8i64 : ValueType<512, 123>; // n x 8 x i64 vector value +def nxv16i64: ValueType<1024,124>; // n x 16 x i64 vector value +def nxv32i64: ValueType<2048,125>; // n x 32 x i64 vector value + +def nxv2f16 : ValueType<32 , 126>; // n x 2 x f16 vector value +def nxv4f16 : ValueType<64 , 127>; // n x 4 x f16 vector value +def nxv8f16 : ValueType<128, 128>; // n x 8 x f16 vector value +def nxv1f32 : ValueType<32 , 129>; // n x 1 x f32 vector value +def nxv2f32 : ValueType<64 , 130>; // n x 2 x f32 vector value +def nxv4f32 : ValueType<128, 131>; // n x 4 x f32 vector value +def nxv8f32 : ValueType<256, 132>; // n x 8 x f32 vector value +def nxv16f32 : ValueType<512, 133>; // n x 16 x f32 vector value +def nxv1f64 : ValueType<64, 134>; // n x 1 x f64 vector value +def nxv2f64 : ValueType<128, 135>; // n x 2 x f64 vector value +def nxv4f64 : ValueType<256, 136>; // n x 4 x f64 vector value +def nxv8f64 : ValueType<512, 137>; // n x 8 x f64 vector value + +def x86mmx : ValueType<64 , 138>; // X86 MMX value +def FlagVT : ValueType<0 , 139>; // Pre-RA sched glue +def isVoid : ValueType<0 , 140>; // Produces no value +def untyped: ValueType<8 , 141>; // Produces an untyped value +def exnref: ValueType<0, 142>; // WebAssembly's exnref type def token : ValueType<0 , 248>; // TokenTy def MetadataVT: ValueType<0, 249>; // Metadata diff --git a/llvm/include/llvm/IR/CallingConv.h b/llvm/include/llvm/IR/CallingConv.h --- a/llvm/include/llvm/IR/CallingConv.h +++ b/llvm/include/llvm/IR/CallingConv.h @@ -241,6 +241,14 @@ /// The remainder matches the regular calling convention. WASM_EmscriptenInvoke = 99, + /// Calling convention used for NEC SX-Aurora VE vec_expf intrinsic + /// function. + VE_VEC_EXPF = 100, + + /// Calling convention used for NEC SX-Aurora VE llvm_grow_stack intrinsic + /// function. + VE_LLVM_GROW_STACK = 101, + /// The highest possible calling convention ID. Must be some 2^k - 1. MaxID = 1023 }; diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -232,6 +232,7 @@ def llvm_v16i1_ty : LLVMType; // 16 x i1 def llvm_v32i1_ty : LLVMType; // 32 x i1 def llvm_v64i1_ty : LLVMType; // 64 x i1 +def llvm_v256i1_ty : LLVMType; // 256 x i1 def llvm_v512i1_ty : LLVMType; // 512 x i1 def llvm_v1024i1_ty : LLVMType; //1024 x i1 @@ -1306,3 +1307,4 @@ include "llvm/IR/IntrinsicsSystemZ.td" include "llvm/IR/IntrinsicsWebAssembly.td" include "llvm/IR/IntrinsicsRISCV.td" +include "llvm/IR/IntrinsicsVE.td" diff --git a/llvm/include/llvm/IR/IntrinsicsVE.td b/llvm/include/llvm/IR/IntrinsicsVE.td new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsVE.td @@ -0,0 +1,32 @@ +let TargetPrefix = "ve" in { + + // fencem instructions + def int_ve_fencem1 : Intrinsic<[], [], []>; + def int_ve_fencem2 : Intrinsic<[], [], []>; + def int_ve_fencem3 : Intrinsic<[], [], []>; + + // VEL Intrinsics + def int_ve_vl_svob : GCCBuiltin<"__builtin_ve_vl_svob">, Intrinsic<[], [], [IntrHasSideEffects]>; + + def int_ve_vl_pack_f32p : GCCBuiltin<"__builtin_ve_vl_pack_f32p">, + Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty], [IntrReadMem]>; + + def int_ve_vl_pack_f32a : GCCBuiltin<"__builtin_ve_vl_pack_f32a">, + Intrinsic<[llvm_i64_ty], [llvm_ptr_ty], [IntrReadMem]>; + + def int_ve_vl_extract_vm512u : GCCBuiltin<"__builtin_ve_vl_extract_vm512u">, + Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; + + def int_ve_vl_extract_vm512l : GCCBuiltin<"__builtin_ve_vl_extract_vm512l">, + Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; + + def int_ve_vl_insert_vm512u : GCCBuiltin<"__builtin_ve_vl_insert_vm512u">, + Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; + + def int_ve_vl_insert_vm512l : GCCBuiltin<"__builtin_ve_vl_insert_vm512l">, + Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; + +} + +include "llvm/IR/IntrinsicsVEVL.gen.td" + diff --git a/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsVEVL.gen.td @@ -0,0 +1,1305 @@ +let TargetPrefix = "ve" in def int_ve_vl_vld_vssl : GCCBuiltin<"__builtin_ve_vl_vld_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vld_vssvl : GCCBuiltin<"__builtin_ve_vl_vld_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldu_vssl : GCCBuiltin<"__builtin_ve_vl_vldu_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldu_vssvl : GCCBuiltin<"__builtin_ve_vl_vldu_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldunc_vssl : GCCBuiltin<"__builtin_ve_vl_vldunc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldunc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldunc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlsx_vssl : GCCBuiltin<"__builtin_ve_vl_vldlsx_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlsx_vssvl : GCCBuiltin<"__builtin_ve_vl_vldlsx_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlsxnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldlsxnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlsxnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldlsxnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlzx_vssl : GCCBuiltin<"__builtin_ve_vl_vldlzx_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlzx_vssvl : GCCBuiltin<"__builtin_ve_vl_vldlzx_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlzxnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldlzxnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldlzxnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldlzxnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vld2d_vssl : GCCBuiltin<"__builtin_ve_vl_vld2d_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vld2d_vssvl : GCCBuiltin<"__builtin_ve_vl_vld2d_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vld2dnc_vssl : GCCBuiltin<"__builtin_ve_vl_vld2dnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vld2dnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vld2dnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldu2d_vssl : GCCBuiltin<"__builtin_ve_vl_vldu2d_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldu2d_vssvl : GCCBuiltin<"__builtin_ve_vl_vldu2d_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldu2dnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldu2dnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldu2dnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldu2dnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dsx_vssl : GCCBuiltin<"__builtin_ve_vl_vldl2dsx_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dsx_vssvl : GCCBuiltin<"__builtin_ve_vl_vldl2dsx_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dsxnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldl2dsxnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dsxnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldl2dsxnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dzx_vssl : GCCBuiltin<"__builtin_ve_vl_vldl2dzx_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dzx_vssvl : GCCBuiltin<"__builtin_ve_vl_vldl2dzx_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dzxnc_vssl : GCCBuiltin<"__builtin_ve_vl_vldl2dzxnc_vssl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vldl2dzxnc_vssvl : GCCBuiltin<"__builtin_ve_vl_vldl2dzxnc_vssvl">, Intrinsic<[LLVMType], [LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst_vssl : GCCBuiltin<"__builtin_ve_vl_vst_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst_vssml : GCCBuiltin<"__builtin_ve_vl_vst_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstnc_vssl : GCCBuiltin<"__builtin_ve_vl_vstnc_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstnc_vssml : GCCBuiltin<"__builtin_ve_vl_vstnc_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstot_vssl : GCCBuiltin<"__builtin_ve_vl_vstot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstot_vssml : GCCBuiltin<"__builtin_ve_vl_vstot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstncot_vssl : GCCBuiltin<"__builtin_ve_vl_vstncot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstncot_vssml : GCCBuiltin<"__builtin_ve_vl_vstncot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu_vssl : GCCBuiltin<"__builtin_ve_vl_vstu_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu_vssml : GCCBuiltin<"__builtin_ve_vl_vstu_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstunc_vssl : GCCBuiltin<"__builtin_ve_vl_vstunc_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstunc_vssml : GCCBuiltin<"__builtin_ve_vl_vstunc_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstuot_vssl : GCCBuiltin<"__builtin_ve_vl_vstuot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstuot_vssml : GCCBuiltin<"__builtin_ve_vl_vstuot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstuncot_vssl : GCCBuiltin<"__builtin_ve_vl_vstuncot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstuncot_vssml : GCCBuiltin<"__builtin_ve_vl_vstuncot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl_vssl : GCCBuiltin<"__builtin_ve_vl_vstl_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl_vssml : GCCBuiltin<"__builtin_ve_vl_vstl_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstlnc_vssl : GCCBuiltin<"__builtin_ve_vl_vstlnc_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstlnc_vssml : GCCBuiltin<"__builtin_ve_vl_vstlnc_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstlot_vssl : GCCBuiltin<"__builtin_ve_vl_vstlot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstlot_vssml : GCCBuiltin<"__builtin_ve_vl_vstlot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstlncot_vssl : GCCBuiltin<"__builtin_ve_vl_vstlncot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstlncot_vssml : GCCBuiltin<"__builtin_ve_vl_vstlncot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2d_vssl : GCCBuiltin<"__builtin_ve_vl_vst2d_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2d_vssml : GCCBuiltin<"__builtin_ve_vl_vst2d_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2dnc_vssl : GCCBuiltin<"__builtin_ve_vl_vst2dnc_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2dnc_vssml : GCCBuiltin<"__builtin_ve_vl_vst2dnc_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2dot_vssl : GCCBuiltin<"__builtin_ve_vl_vst2dot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2dot_vssml : GCCBuiltin<"__builtin_ve_vl_vst2dot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2dncot_vssl : GCCBuiltin<"__builtin_ve_vl_vst2dncot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vst2dncot_vssml : GCCBuiltin<"__builtin_ve_vl_vst2dncot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2d_vssl : GCCBuiltin<"__builtin_ve_vl_vstu2d_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2d_vssml : GCCBuiltin<"__builtin_ve_vl_vstu2d_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2dnc_vssl : GCCBuiltin<"__builtin_ve_vl_vstu2dnc_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2dnc_vssml : GCCBuiltin<"__builtin_ve_vl_vstu2dnc_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2dot_vssl : GCCBuiltin<"__builtin_ve_vl_vstu2dot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2dot_vssml : GCCBuiltin<"__builtin_ve_vl_vstu2dot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2dncot_vssl : GCCBuiltin<"__builtin_ve_vl_vstu2dncot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstu2dncot_vssml : GCCBuiltin<"__builtin_ve_vl_vstu2dncot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2d_vssl : GCCBuiltin<"__builtin_ve_vl_vstl2d_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2d_vssml : GCCBuiltin<"__builtin_ve_vl_vstl2d_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2dnc_vssl : GCCBuiltin<"__builtin_ve_vl_vstl2dnc_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2dnc_vssml : GCCBuiltin<"__builtin_ve_vl_vstl2dnc_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2dot_vssl : GCCBuiltin<"__builtin_ve_vl_vstl2dot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2dot_vssml : GCCBuiltin<"__builtin_ve_vl_vstl2dot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2dncot_vssl : GCCBuiltin<"__builtin_ve_vl_vstl2dncot_vssl">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vstl2dncot_vssml : GCCBuiltin<"__builtin_ve_vl_vstl2dncot_vssml">, Intrinsic<[], [LLVMType, LLVMType, llvm_ptr_ty, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pfchv_ssl : GCCBuiltin<"__builtin_ve_vl_pfchv_ssl">, Intrinsic<[], [LLVMType, llvm_ptr_ty, LLVMType], [IntrInaccessibleMemOrArgMemOnly]>; +let TargetPrefix = "ve" in def int_ve_vl_pfchvnc_ssl : GCCBuiltin<"__builtin_ve_vl_pfchvnc_ssl">, Intrinsic<[], [LLVMType, llvm_ptr_ty, LLVMType], [IntrInaccessibleMemOrArgMemOnly]>; +let TargetPrefix = "ve" in def int_ve_vl_lsv_vvss : GCCBuiltin<"__builtin_ve_vl_lsv_vvss">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_lvsl_svs : GCCBuiltin<"__builtin_ve_vl_lvsl_svs">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_lvsd_svs : GCCBuiltin<"__builtin_ve_vl_lvsd_svs">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_lvss_svs : GCCBuiltin<"__builtin_ve_vl_lvss_svs">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_lvm_mmss : GCCBuiltin<"__builtin_ve_vl_lvm_mmss">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_lvm_MMss : GCCBuiltin<"__builtin_ve_vl_lvm_MMss">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_svm_sms : GCCBuiltin<"__builtin_ve_vl_svm_sms">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_svm_sMs : GCCBuiltin<"__builtin_ve_vl_svm_sMs">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdd_vsl : GCCBuiltin<"__builtin_ve_vl_vbrdd_vsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdd_vsvl : GCCBuiltin<"__builtin_ve_vl_vbrdd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdd_vsmvl : GCCBuiltin<"__builtin_ve_vl_vbrdd_vsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdl_vsl : GCCBuiltin<"__builtin_ve_vl_vbrdl_vsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdl_vsvl : GCCBuiltin<"__builtin_ve_vl_vbrdl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdl_vsmvl : GCCBuiltin<"__builtin_ve_vl_vbrdl_vsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrds_vsl : GCCBuiltin<"__builtin_ve_vl_vbrds_vsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrds_vsvl : GCCBuiltin<"__builtin_ve_vl_vbrds_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrds_vsmvl : GCCBuiltin<"__builtin_ve_vl_vbrds_vsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdw_vsl : GCCBuiltin<"__builtin_ve_vl_vbrdw_vsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdw_vsvl : GCCBuiltin<"__builtin_ve_vl_vbrdw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vbrdw_vsmvl : GCCBuiltin<"__builtin_ve_vl_vbrdw_vsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvbrd_vsl : GCCBuiltin<"__builtin_ve_vl_pvbrd_vsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvbrd_vsvl : GCCBuiltin<"__builtin_ve_vl_pvbrd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvbrd_vsMvl : GCCBuiltin<"__builtin_ve_vl_pvbrd_vsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmv_vsvl : GCCBuiltin<"__builtin_ve_vl_vmv_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmv_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmv_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmv_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmv_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddul_vvvl : GCCBuiltin<"__builtin_ve_vl_vaddul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vaddul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddul_vsvl : GCCBuiltin<"__builtin_ve_vl_vaddul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vaddul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vaddul_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vaddul_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vadduw_vvvl : GCCBuiltin<"__builtin_ve_vl_vadduw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vadduw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vadduw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vadduw_vsvl : GCCBuiltin<"__builtin_ve_vl_vadduw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vadduw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vadduw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vadduw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vadduw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vadduw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vadduw_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vvvl : GCCBuiltin<"__builtin_ve_vl_pvaddu_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvaddu_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vsvl : GCCBuiltin<"__builtin_ve_vl_pvaddu_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvaddu_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvaddu_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvaddu_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vaddswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vaddswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vaddswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vaddswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vaddswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vaddswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vaddswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vaddswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vaddswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vaddswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vaddswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vaddswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvadds_vvvl : GCCBuiltin<"__builtin_ve_vl_pvadds_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvadds_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvadds_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvadds_vsvl : GCCBuiltin<"__builtin_ve_vl_pvadds_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvadds_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvadds_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvadds_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvadds_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvadds_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvadds_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vaddsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vaddsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vaddsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vaddsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vaddsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vaddsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubul_vvvl : GCCBuiltin<"__builtin_ve_vl_vsubul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsubul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubul_vsvl : GCCBuiltin<"__builtin_ve_vl_vsubul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vsubul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsubul_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vsubul_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vvvl : GCCBuiltin<"__builtin_ve_vl_vsubuw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsubuw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vsvl : GCCBuiltin<"__builtin_ve_vl_vsubuw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vsubuw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsubuw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vsubuw_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsubu_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsubu_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vsvl : GCCBuiltin<"__builtin_ve_vl_pvsubu_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvsubu_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsubu_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvsubu_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vsubswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsubswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vsubswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vsubswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsubswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vsubswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vsubswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsubswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vsubswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vsubswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsubswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vsubswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsubs_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsubs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vsvl : GCCBuiltin<"__builtin_ve_vl_pvsubs_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvsubs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsubs_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvsubs_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vsubsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulul_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmuluw_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmulsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vvvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vsvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmulslw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivul_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivuw_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivswsx_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivswzx_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vdivsl_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpul_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpuw_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmpu_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvcmps_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vcmpsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvmaxs_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminswsx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminswzx_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvmins_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmaxsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vminsl_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vvvl : GCCBuiltin<"__builtin_ve_vl_vand_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vvvvl : GCCBuiltin<"__builtin_ve_vl_vand_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vsvl : GCCBuiltin<"__builtin_ve_vl_vand_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vsvvl : GCCBuiltin<"__builtin_ve_vl_vand_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vand_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vand_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vand_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandlo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvandlo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandlo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvandlo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandlo_vsvl : GCCBuiltin<"__builtin_ve_vl_pvandlo_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandlo_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvandlo_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandlo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvandlo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandlo_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvandlo_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvandup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvandup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandup_vsvl : GCCBuiltin<"__builtin_ve_vl_pvandup_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandup_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvandup_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvandup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvandup_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvandup_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvl : GCCBuiltin<"__builtin_ve_vl_pvand_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvand_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvl : GCCBuiltin<"__builtin_ve_vl_pvand_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvand_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvand_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvand_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vvvl : GCCBuiltin<"__builtin_ve_vl_vor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vvvvl : GCCBuiltin<"__builtin_ve_vl_vor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vsvl : GCCBuiltin<"__builtin_ve_vl_vor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vsvvl : GCCBuiltin<"__builtin_ve_vl_vor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vor_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vor_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vor_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorlo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvorlo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorlo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvorlo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorlo_vsvl : GCCBuiltin<"__builtin_ve_vl_pvorlo_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorlo_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvorlo_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorlo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvorlo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorlo_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvorlo_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvorup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvorup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorup_vsvl : GCCBuiltin<"__builtin_ve_vl_pvorup_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorup_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvorup_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvorup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvorup_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvorup_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvl : GCCBuiltin<"__builtin_ve_vl_pvor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvl : GCCBuiltin<"__builtin_ve_vl_pvor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvor_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvor_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvl : GCCBuiltin<"__builtin_ve_vl_vxor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvvl : GCCBuiltin<"__builtin_ve_vl_vxor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvl : GCCBuiltin<"__builtin_ve_vl_vxor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvvl : GCCBuiltin<"__builtin_ve_vl_vxor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vxor_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vxor_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorlo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvxorlo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorlo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvxorlo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorlo_vsvl : GCCBuiltin<"__builtin_ve_vl_pvxorlo_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorlo_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvxorlo_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorlo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvxorlo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorlo_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvxorlo_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvxorup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvxorup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorup_vsvl : GCCBuiltin<"__builtin_ve_vl_pvxorup_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorup_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvxorup_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvxorup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxorup_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvxorup_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvxor_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_veqv_vvvl : GCCBuiltin<"__builtin_ve_vl_veqv_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_veqv_vvvvl : GCCBuiltin<"__builtin_ve_vl_veqv_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_veqv_vsvl : GCCBuiltin<"__builtin_ve_vl_veqv_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_veqv_vsvvl : GCCBuiltin<"__builtin_ve_vl_veqv_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_veqv_vvvmvl : GCCBuiltin<"__builtin_ve_vl_veqv_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_veqv_vsvmvl : GCCBuiltin<"__builtin_ve_vl_veqv_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvlo_vvvl : GCCBuiltin<"__builtin_ve_vl_pveqvlo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvlo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pveqvlo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvlo_vsvl : GCCBuiltin<"__builtin_ve_vl_pveqvlo_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvlo_vsvvl : GCCBuiltin<"__builtin_ve_vl_pveqvlo_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvlo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pveqvlo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvlo_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pveqvlo_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvup_vvvl : GCCBuiltin<"__builtin_ve_vl_pveqvup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pveqvup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvup_vsvl : GCCBuiltin<"__builtin_ve_vl_pveqvup_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvup_vsvvl : GCCBuiltin<"__builtin_ve_vl_pveqvup_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pveqvup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqvup_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pveqvup_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqv_vvvl : GCCBuiltin<"__builtin_ve_vl_pveqv_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqv_vvvvl : GCCBuiltin<"__builtin_ve_vl_pveqv_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqv_vsvl : GCCBuiltin<"__builtin_ve_vl_pveqv_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqv_vsvvl : GCCBuiltin<"__builtin_ve_vl_pveqv_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqv_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pveqv_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pveqv_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pveqv_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vseq_vl : GCCBuiltin<"__builtin_ve_vl_vseq_vl">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vseq_vvl : GCCBuiltin<"__builtin_ve_vl_vseq_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvseqlo_vl : GCCBuiltin<"__builtin_ve_vl_pvseqlo_vl">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvseqlo_vvl : GCCBuiltin<"__builtin_ve_vl_pvseqlo_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsequp_vl : GCCBuiltin<"__builtin_ve_vl_pvsequp_vl">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsequp_vvl : GCCBuiltin<"__builtin_ve_vl_pvsequp_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvseq_vl : GCCBuiltin<"__builtin_ve_vl_pvseq_vl">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvseq_vvl : GCCBuiltin<"__builtin_ve_vl_pvseq_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsl : GCCBuiltin<"__builtin_ve_vl_vsll_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsll_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslllo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvslllo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslllo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvslllo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslllo_vvsl : GCCBuiltin<"__builtin_ve_vl_pvslllo_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslllo_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvslllo_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslllo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvslllo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslllo_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvslllo_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsllup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsllup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsllup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsllup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsllup_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsllup_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsllup_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsllup_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsllup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsllup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsllup_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsllup_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsll_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsrl_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrllo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsrllo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrllo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsrllo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrllo_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsrllo_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrllo_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsrllo_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrllo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsrllo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrllo_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsrllo_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrlup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsrlup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrlup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsrlup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrlup_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsrlup_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrlup_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsrlup_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrlup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsrlup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrlup_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsrlup_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsrl_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslaw_vvvl : GCCBuiltin<"__builtin_ve_vl_vslaw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslaw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vslaw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslaw_vvsl : GCCBuiltin<"__builtin_ve_vl_vslaw_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslaw_vvsvl : GCCBuiltin<"__builtin_ve_vl_vslaw_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslaw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vslaw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslaw_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vslaw_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslalo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvslalo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslalo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvslalo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslalo_vvsl : GCCBuiltin<"__builtin_ve_vl_pvslalo_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslalo_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvslalo_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslalo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvslalo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslalo_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvslalo_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslaup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvslaup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslaup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvslaup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslaup_vvsl : GCCBuiltin<"__builtin_ve_vl_pvslaup_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslaup_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvslaup_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslaup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvslaup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvslaup_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvslaup_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsla_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsl : GCCBuiltin<"__builtin_ve_vl_vslal_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vslal_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsraw_vvvl : GCCBuiltin<"__builtin_ve_vl_vsraw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsraw_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsraw_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsraw_vvsl : GCCBuiltin<"__builtin_ve_vl_vsraw_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsraw_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsraw_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsraw_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsraw_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsraw_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsraw_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsralo_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsralo_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsralo_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsralo_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsralo_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsralo_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsralo_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsralo_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsralo_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsralo_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsralo_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsralo_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsraup_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsraup_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsraup_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsraup_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsraup_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsraup_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsraup_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsraup_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsraup_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsraup_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsraup_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsraup_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsMvl : GCCBuiltin<"__builtin_ve_vl_pvsra_vvsMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsl : GCCBuiltin<"__builtin_ve_vl_vsral_vvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsmvl : GCCBuiltin<"__builtin_ve_vl_vsral_vvsmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssl : GCCBuiltin<"__builtin_ve_vl_vsfa_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssvl : GCCBuiltin<"__builtin_ve_vl_vsfa_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vsfa_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vvvl : GCCBuiltin<"__builtin_ve_vl_vfaddd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfaddd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vsvl : GCCBuiltin<"__builtin_ve_vl_vfaddd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfaddd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfaddd_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfaddd_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfadds_vvvl : GCCBuiltin<"__builtin_ve_vl_vfadds_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfadds_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfadds_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfadds_vsvl : GCCBuiltin<"__builtin_ve_vl_vfadds_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfadds_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfadds_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfadds_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfadds_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfadds_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfadds_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vvvl : GCCBuiltin<"__builtin_ve_vl_pvfadd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfadd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vsvl : GCCBuiltin<"__builtin_ve_vl_pvfadd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfadd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfadd_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfadd_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vvvl : GCCBuiltin<"__builtin_ve_vl_vfsubd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfsubd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vsvl : GCCBuiltin<"__builtin_ve_vl_vfsubd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfsubd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfsubd_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfsubd_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vvvl : GCCBuiltin<"__builtin_ve_vl_vfsubs_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfsubs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vsvl : GCCBuiltin<"__builtin_ve_vl_vfsubs_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfsubs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfsubs_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfsubs_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vvvl : GCCBuiltin<"__builtin_ve_vl_pvfsub_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfsub_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vsvl : GCCBuiltin<"__builtin_ve_vl_pvfsub_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfsub_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfsub_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfsub_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vvvl : GCCBuiltin<"__builtin_ve_vl_vfmuld_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmuld_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vsvl : GCCBuiltin<"__builtin_ve_vl_vfmuld_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmuld_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmuld_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmuld_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vvvl : GCCBuiltin<"__builtin_ve_vl_vfmuls_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmuls_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vsvl : GCCBuiltin<"__builtin_ve_vl_vfmuls_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmuls_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmuls_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmuls_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vvvl : GCCBuiltin<"__builtin_ve_vl_pvfmul_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmul_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vsvl : GCCBuiltin<"__builtin_ve_vl_pvfmul_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmul_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmul_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmul_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vvvl : GCCBuiltin<"__builtin_ve_vl_vfdivd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfdivd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vsvl : GCCBuiltin<"__builtin_ve_vl_vfdivd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfdivd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfdivd_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfdivd_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vvvl : GCCBuiltin<"__builtin_ve_vl_vfdivs_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfdivs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vsvl : GCCBuiltin<"__builtin_ve_vl_vfdivs_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfdivs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfdivs_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfdivs_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsqrtd_vvl : GCCBuiltin<"__builtin_ve_vl_vfsqrtd_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsqrtd_vvvl : GCCBuiltin<"__builtin_ve_vl_vfsqrtd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsqrts_vvl : GCCBuiltin<"__builtin_ve_vl_vfsqrts_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsqrts_vvvl : GCCBuiltin<"__builtin_ve_vl_vfsqrts_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vvvl : GCCBuiltin<"__builtin_ve_vl_vfcmpd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfcmpd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vsvl : GCCBuiltin<"__builtin_ve_vl_vfcmpd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfcmpd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfcmpd_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfcmpd_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vvvl : GCCBuiltin<"__builtin_ve_vl_vfcmps_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfcmps_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vsvl : GCCBuiltin<"__builtin_ve_vl_vfcmps_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfcmps_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfcmps_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfcmps_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vvvl : GCCBuiltin<"__builtin_ve_vl_pvfcmp_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfcmp_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vsvl : GCCBuiltin<"__builtin_ve_vl_pvfcmp_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfcmp_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfcmp_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfcmp_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vvvl : GCCBuiltin<"__builtin_ve_vl_vfmaxd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmaxd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vsvl : GCCBuiltin<"__builtin_ve_vl_vfmaxd_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmaxd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmaxd_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmaxd_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vvvl : GCCBuiltin<"__builtin_ve_vl_vfmaxs_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmaxs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vsvl : GCCBuiltin<"__builtin_ve_vl_vfmaxs_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmaxs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmaxs_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmaxs_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vvvl : GCCBuiltin<"__builtin_ve_vl_pvfmax_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmax_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vsvl : GCCBuiltin<"__builtin_ve_vl_pvfmax_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmax_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmax_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmax_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmind_vvvl : GCCBuiltin<"__builtin_ve_vl_vfmind_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmind_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmind_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmind_vsvl : GCCBuiltin<"__builtin_ve_vl_vfmind_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmind_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmind_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmind_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmind_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmind_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmind_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmins_vvvl : GCCBuiltin<"__builtin_ve_vl_vfmins_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmins_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmins_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmins_vsvl : GCCBuiltin<"__builtin_ve_vl_vfmins_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmins_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmins_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmins_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmins_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmins_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmins_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vvvl : GCCBuiltin<"__builtin_ve_vl_pvfmin_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmin_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vsvl : GCCBuiltin<"__builtin_ve_vl_pvfmin_vsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmin_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmin_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmin_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmadd_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmads_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vsvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vvvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vsvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vsvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmad_vvsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmsbd_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfmsbs_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vsvvvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvsvvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vvvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vsvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vsvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfmsb_vvsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmadd_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmads_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvvvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vsvvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvsvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vvvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vsvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vsvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfnmad_vvsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbd_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vsvvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvsvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvsvvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vvvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vsvvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vsvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvsvmvl : GCCBuiltin<"__builtin_ve_vl_vfnmsbs_vvsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvvvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vvvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vsvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vsvvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vsvvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvsvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvsvvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vvsvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vvvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vsvvMvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vsvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvsvMvl : GCCBuiltin<"__builtin_ve_vl_pvfnmsb_vvsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrcpd_vvl : GCCBuiltin<"__builtin_ve_vl_vrcpd_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrcpd_vvvl : GCCBuiltin<"__builtin_ve_vl_vrcpd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrcps_vvl : GCCBuiltin<"__builtin_ve_vl_vrcps_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrcps_vvvl : GCCBuiltin<"__builtin_ve_vl_vrcps_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvrcp_vvl : GCCBuiltin<"__builtin_ve_vl_pvrcp_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvrcp_vvvl : GCCBuiltin<"__builtin_ve_vl_pvrcp_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrtd_vvl : GCCBuiltin<"__builtin_ve_vl_vrsqrtd_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrtd_vvvl : GCCBuiltin<"__builtin_ve_vl_vrsqrtd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrts_vvl : GCCBuiltin<"__builtin_ve_vl_vrsqrts_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrts_vvvl : GCCBuiltin<"__builtin_ve_vl_vrsqrts_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvrsqrt_vvl : GCCBuiltin<"__builtin_ve_vl_pvrsqrt_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvrsqrt_vvvl : GCCBuiltin<"__builtin_ve_vl_pvrsqrt_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrtdnex_vvl : GCCBuiltin<"__builtin_ve_vl_vrsqrtdnex_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrtdnex_vvvl : GCCBuiltin<"__builtin_ve_vl_vrsqrtdnex_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrtsnex_vvl : GCCBuiltin<"__builtin_ve_vl_vrsqrtsnex_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrsqrtsnex_vvvl : GCCBuiltin<"__builtin_ve_vl_vrsqrtsnex_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvrsqrtnex_vvl : GCCBuiltin<"__builtin_ve_vl_pvrsqrtnex_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvrsqrtnex_vvvl : GCCBuiltin<"__builtin_ve_vl_pvrsqrtnex_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsx_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdsx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsx_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdsx_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsxrz_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdsxrz_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsxrz_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdsxrz_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsxrz_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdsxrz_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzx_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdzx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzx_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdzx_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzxrz_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdzxrz_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzxrz_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdzxrz_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzxrz_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwdzxrz_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwssx_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwssx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwssx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwssx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwssx_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwssx_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwssxrz_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwssxrz_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwssxrz_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwssxrz_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwssxrz_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwssxrz_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwszx_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwszx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwszx_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwszx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwszx_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwszx_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwszxrz_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtwszxrz_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwszxrz_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtwszxrz_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtwszxrz_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtwszxrz_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtws_vvl : GCCBuiltin<"__builtin_ve_vl_pvcvtws_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtws_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcvtws_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtws_vvMvl : GCCBuiltin<"__builtin_ve_vl_pvcvtws_vvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtwsrz_vvl : GCCBuiltin<"__builtin_ve_vl_pvcvtwsrz_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtwsrz_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcvtwsrz_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtwsrz_vvMvl : GCCBuiltin<"__builtin_ve_vl_pvcvtwsrz_vvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtld_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtld_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtld_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtld_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtld_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtld_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtldrz_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtldrz_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtldrz_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtldrz_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtldrz_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcvtldrz_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtdw_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtdw_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtdw_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtdw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtsw_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtsw_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtsw_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtsw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtsw_vvl : GCCBuiltin<"__builtin_ve_vl_pvcvtsw_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvcvtsw_vvvl : GCCBuiltin<"__builtin_ve_vl_pvcvtsw_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtdl_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtdl_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtdl_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtdl_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtds_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtds_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtds_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtds_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtsd_vvl : GCCBuiltin<"__builtin_ve_vl_vcvtsd_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcvtsd_vvvl : GCCBuiltin<"__builtin_ve_vl_vcvtsd_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrg_vvvml : GCCBuiltin<"__builtin_ve_vl_vmrg_vvvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrg_vvvmvl : GCCBuiltin<"__builtin_ve_vl_vmrg_vvvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrg_vsvml : GCCBuiltin<"__builtin_ve_vl_vmrg_vsvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrg_vsvmvl : GCCBuiltin<"__builtin_ve_vl_vmrg_vsvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vvvMl : GCCBuiltin<"__builtin_ve_vl_vmrgw_vvvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vvvMvl : GCCBuiltin<"__builtin_ve_vl_vmrgw_vvvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vsvMl : GCCBuiltin<"__builtin_ve_vl_vmrgw_vsvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vsvMvl : GCCBuiltin<"__builtin_ve_vl_vmrgw_vsvMvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vshf_vvvsl : GCCBuiltin<"__builtin_ve_vl_vshf_vvvsl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vshf_vvvsvl : GCCBuiltin<"__builtin_ve_vl_vshf_vvvsvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vcp_vvmvl : GCCBuiltin<"__builtin_ve_vl_vcp_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vex_vvmvl : GCCBuiltin<"__builtin_ve_vl_vex_vvmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklat_ml : GCCBuiltin<"__builtin_ve_vl_vfmklat_ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklaf_ml : GCCBuiltin<"__builtin_ve_vl_vfmklaf_ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloat_ml : GCCBuiltin<"__builtin_ve_vl_pvfmkwloat_ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupat_ml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupat_ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloaf_ml : GCCBuiltin<"__builtin_ve_vl_pvfmkwloaf_ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupaf_ml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupaf_ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkat_Ml : GCCBuiltin<"__builtin_ve_vl_pvfmkat_Ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkaf_Ml : GCCBuiltin<"__builtin_ve_vl_pvfmkaf_Ml">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklgt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklgt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklgt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklgt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkllt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkllt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkllt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkllt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklne_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklne_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklne_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklne_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkleq_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkleq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkleq_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkleq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklge_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklge_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklle_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklle_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklle_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklle_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklnum_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklnum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklnum_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklnum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklgtnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklgtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklgtnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklgtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklltnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklltnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklnenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklnenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklnenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklnenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkleqnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkleqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkleqnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkleqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklgenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmklgenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmklgenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmklgenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkllenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkllenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkllenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkllenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwgt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwgt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwgt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwgt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwlt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwlt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwlt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwlt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwne_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwne_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwne_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwne_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkweq_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkweq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkweq_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkweq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwge_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwge_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwle_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwle_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwle_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwle_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwnum_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwnum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwnum_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwnum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwgtnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwgtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwgtnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwgtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwltnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwltnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwnenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwnenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwnenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwnenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkweqnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkweqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkweqnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkweqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwgenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwgenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwgenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwgenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwlenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkwlenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkwlenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkwlenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlogt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupgt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlogt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupgt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlolt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwuplt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlolt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwuplt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlone_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlone_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupne_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupne_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlone_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlone_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupne_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupne_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeq_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwloeq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeq_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupeq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeq_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwloeq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeq_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupeq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloge_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwloge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupge_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloge_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwloge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupge_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlole_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlole_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuple_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwuple_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlole_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlole_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuple_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwuple_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonum_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlonum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnum_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupnum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonum_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlonum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnum_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupnum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlonan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlonan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogtnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlogtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgtnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupgtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogtnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlogtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgtnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupgtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloltnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwloltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupltnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloltnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwloltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupltnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlonenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupnenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlonenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupnenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeqnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwloeqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeqnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupeqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeqnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwloeqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeqnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupeqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlogenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwupgenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlogenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwupgenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlolenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwuplenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwlolenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkwuplenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgt_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwgt_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgt_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwgt_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlt_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlt_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlt_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlt_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwne_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwne_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwne_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwne_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkweq_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkweq_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkweq_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkweq_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwge_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwge_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwge_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwge_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwle_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwle_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwle_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwle_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnum_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwnum_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnum_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwnum_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgtnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwgtnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgtnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwgtnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwltnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwltnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwltnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwltnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnenan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwnenan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnenan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwnenan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkweqnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkweqnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkweqnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkweqnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgenan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwgenan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgenan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwgenan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlenan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlenan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlenan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkwlenan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdgt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdgt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdgt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdgt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdlt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdlt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdlt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdlt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdne_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdne_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdne_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdne_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdeq_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdeq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdeq_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdeq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdge_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdge_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdle_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdle_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdle_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdle_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdnum_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdnum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdnum_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdnum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdgtnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdgtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdgtnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdgtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdltnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdltnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdnenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdnenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdnenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdnenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdeqnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdeqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdeqnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdeqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdgenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdgenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdgenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdgenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdlenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkdlenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkdlenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkdlenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksgt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksgt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksgt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksgt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkslt_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkslt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkslt_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkslt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksne_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksne_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksne_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksne_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkseq_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkseq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkseq_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkseq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksge_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksge_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksle_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksle_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksle_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksle_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksnum_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksnum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksnum_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksnum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksgtnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksgtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksgtnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksgtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksltnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksltnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksnenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksnenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksnenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksnenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkseqnan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkseqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkseqnan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkseqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksgenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmksgenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmksgenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmksgenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkslenan_mvl : GCCBuiltin<"__builtin_ve_vl_vfmkslenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfmkslenan_mvml : GCCBuiltin<"__builtin_ve_vl_vfmkslenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslogt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupgt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslogt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupgt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslolt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplt_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksuplt_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslolt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplt_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksuplt_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslone_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslone_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupne_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupne_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslone_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslone_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupne_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupne_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeq_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksloeq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeq_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupeq_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeq_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksloeq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeq_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupeq_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloge_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksloge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupge_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupge_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloge_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksloge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupge_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupge_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslole_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslole_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksuple_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksuple_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslole_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslole_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksuple_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksuple_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonum_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslonum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnum_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupnum_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonum_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslonum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnum_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupnum_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslonan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslonan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogtnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslogtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgtnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupgtnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogtnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslogtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgtnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupgtnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloltnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksloltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupltnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupltnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloltnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksloltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupltnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupltnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslonenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupnenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslonenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupnenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeqnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksloeqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeqnan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupeqnan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeqnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksloeqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeqnan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupeqnan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslogenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksupgenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslogenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksupgenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslolenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplenan_mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksuplenan_mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmkslolenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplenan_mvml : GCCBuiltin<"__builtin_ve_vl_pvfmksuplenan_mvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksgt_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksgt_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksgt_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksgt_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslt_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslt_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslt_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkslt_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksne_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksne_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksne_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksne_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkseq_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkseq_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkseq_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkseq_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksge_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksge_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksge_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksge_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksle_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksle_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksle_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksle_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksnum_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksnum_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksnum_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksnum_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksgtnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksgtnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksgtnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksgtnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksltnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksltnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksltnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksltnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksnenan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksnenan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksnenan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksnenan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkseqnan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkseqnan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkseqnan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkseqnan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksgenan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmksgenan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmksgenan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmksgenan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslenan_Mvl : GCCBuiltin<"__builtin_ve_vl_pvfmkslenan_Mvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pvfmkslenan_MvMl : GCCBuiltin<"__builtin_ve_vl_pvfmkslenan_MvMl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsumwsx_vvl : GCCBuiltin<"__builtin_ve_vl_vsumwsx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsumwsx_vvml : GCCBuiltin<"__builtin_ve_vl_vsumwsx_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsumwzx_vvl : GCCBuiltin<"__builtin_ve_vl_vsumwzx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsumwzx_vvml : GCCBuiltin<"__builtin_ve_vl_vsumwzx_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsuml_vvl : GCCBuiltin<"__builtin_ve_vl_vsuml_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsuml_vvml : GCCBuiltin<"__builtin_ve_vl_vsuml_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsumd_vvl : GCCBuiltin<"__builtin_ve_vl_vfsumd_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsumd_vvml : GCCBuiltin<"__builtin_ve_vl_vfsumd_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsums_vvl : GCCBuiltin<"__builtin_ve_vl_vfsums_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfsums_vvml : GCCBuiltin<"__builtin_ve_vl_vfsums_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstsx_vvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswfstsx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswfstsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstsx_vvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswlstsx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswlstsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstzx_vvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswfstzx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswfstzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstzx_vvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswlstzx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrmaxswlstzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswfstsx_vvl : GCCBuiltin<"__builtin_ve_vl_vrminswfstsx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswfstsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrminswfstsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswlstsx_vvl : GCCBuiltin<"__builtin_ve_vl_vrminswlstsx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswlstsx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrminswlstsx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswfstzx_vvl : GCCBuiltin<"__builtin_ve_vl_vrminswfstzx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswfstzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrminswfstzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswlstzx_vvl : GCCBuiltin<"__builtin_ve_vl_vrminswlstzx_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminswlstzx_vvvl : GCCBuiltin<"__builtin_ve_vl_vrminswlstzx_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxslfst_vvl : GCCBuiltin<"__builtin_ve_vl_vrmaxslfst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxslfst_vvvl : GCCBuiltin<"__builtin_ve_vl_vrmaxslfst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxsllst_vvl : GCCBuiltin<"__builtin_ve_vl_vrmaxsllst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrmaxsllst_vvvl : GCCBuiltin<"__builtin_ve_vl_vrmaxsllst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminslfst_vvl : GCCBuiltin<"__builtin_ve_vl_vrminslfst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminslfst_vvvl : GCCBuiltin<"__builtin_ve_vl_vrminslfst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminsllst_vvl : GCCBuiltin<"__builtin_ve_vl_vrminsllst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrminsllst_vvvl : GCCBuiltin<"__builtin_ve_vl_vrminsllst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdfst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxdfst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdfst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxdfst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdlst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxdlst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdlst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxdlst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxsfst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxsfst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxsfst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxsfst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxslst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxslst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmaxslst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrmaxslst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmindfst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrmindfst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmindfst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrmindfst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmindlst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrmindlst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrmindlst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrmindlst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrminsfst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrminsfst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrminsfst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrminsfst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrminslst_vvl : GCCBuiltin<"__builtin_ve_vl_vfrminslst_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vfrminslst_vvvl : GCCBuiltin<"__builtin_ve_vl_vfrminslst_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrand_vvl : GCCBuiltin<"__builtin_ve_vl_vrand_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrand_vvml : GCCBuiltin<"__builtin_ve_vl_vrand_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vror_vvl : GCCBuiltin<"__builtin_ve_vl_vror_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vror_vvml : GCCBuiltin<"__builtin_ve_vl_vror_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrxor_vvl : GCCBuiltin<"__builtin_ve_vl_vrxor_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vrxor_vvml : GCCBuiltin<"__builtin_ve_vl_vrxor_vvml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssl : GCCBuiltin<"__builtin_ve_vl_vgt_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgt_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssml : GCCBuiltin<"__builtin_ve_vl_vgt_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgt_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtnc_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtu_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtunc_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlsx_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlzx_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssvl : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssml">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssmvl : GCCBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssmvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrReadMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsc_vvssl : GCCBuiltin<"__builtin_ve_vl_vsc_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsc_vvssml : GCCBuiltin<"__builtin_ve_vl_vsc_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vscnc_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vscnc_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscot_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscot_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscncot_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscncot_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscu_vvssl : GCCBuiltin<"__builtin_ve_vl_vscu_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscu_vvssml : GCCBuiltin<"__builtin_ve_vl_vscu_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscunc_vvssl : GCCBuiltin<"__builtin_ve_vl_vscunc_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscunc_vvssml : GCCBuiltin<"__builtin_ve_vl_vscunc_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscuot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscuot_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscuot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscuot_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscuncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vscuncot_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscuncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vscuncot_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscl_vvssl : GCCBuiltin<"__builtin_ve_vl_vscl_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vscl_vvssml : GCCBuiltin<"__builtin_ve_vl_vscl_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsclnc_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclnc_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsclnc_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclnc_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclot_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclot_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssl : GCCBuiltin<"__builtin_ve_vl_vsclncot_vvssl">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssml : GCCBuiltin<"__builtin_ve_vl_vsclncot_vvssml">, Intrinsic<[], [LLVMType, LLVMType, LLVMType, LLVMType, LLVMType, LLVMType], [IntrWriteMem]>; +let TargetPrefix = "ve" in def int_ve_vl_andm_mmm : GCCBuiltin<"__builtin_ve_vl_andm_mmm">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_andm_MMM : GCCBuiltin<"__builtin_ve_vl_andm_MMM">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_orm_mmm : GCCBuiltin<"__builtin_ve_vl_orm_mmm">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_orm_MMM : GCCBuiltin<"__builtin_ve_vl_orm_MMM">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_xorm_mmm : GCCBuiltin<"__builtin_ve_vl_xorm_mmm">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_xorm_MMM : GCCBuiltin<"__builtin_ve_vl_xorm_MMM">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_eqvm_mmm : GCCBuiltin<"__builtin_ve_vl_eqvm_mmm">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_eqvm_MMM : GCCBuiltin<"__builtin_ve_vl_eqvm_MMM">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_nndm_mmm : GCCBuiltin<"__builtin_ve_vl_nndm_mmm">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_nndm_MMM : GCCBuiltin<"__builtin_ve_vl_nndm_MMM">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_negm_mm : GCCBuiltin<"__builtin_ve_vl_negm_mm">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_negm_MM : GCCBuiltin<"__builtin_ve_vl_negm_MM">, Intrinsic<[LLVMType], [LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_pcvm_sml : GCCBuiltin<"__builtin_ve_vl_pcvm_sml">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_lzvm_sml : GCCBuiltin<"__builtin_ve_vl_lzvm_sml">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_tovm_sml : GCCBuiltin<"__builtin_ve_vl_tovm_sml">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vec_expf_vvl : GCCBuiltin<"__builtin_ve_vl_vec_expf_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vec_expf_vvvl : GCCBuiltin<"__builtin_ve_vl_vec_expf_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vec_exp_vvl : GCCBuiltin<"__builtin_ve_vl_vec_exp_vvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType], [IntrNoMem]>; +let TargetPrefix = "ve" in def int_ve_vl_vec_exp_vvvl : GCCBuiltin<"__builtin_ve_vl_vec_exp_vvvl">, Intrinsic<[LLVMType], [LLVMType, LLVMType, LLVMType], [IntrNoMem]>; diff --git a/llvm/include/llvm/Support/MachineValueType.h b/llvm/include/llvm/Support/MachineValueType.h --- a/llvm/include/llvm/Support/MachineValueType.h +++ b/llvm/include/llvm/Support/MachineValueType.h @@ -110,93 +110,101 @@ v8i64 = 60, // 8 x i64 v16i64 = 61, // 16 x i64 v32i64 = 62, // 32 x i64 + v64i64 = 63, // 64 x i64 + v128i64 = 64, // 128 x i64 + v256i64 = 65, // 256 x i64 - v1i128 = 63, // 1 x i128 + v1i128 = 66, // 1 x i128 FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1, LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128, - v2f16 = 64, // 2 x f16 - v3f16 = 65, // 3 x f16 - v4f16 = 66, // 4 x f16 - v8f16 = 67, // 8 x f16 - v16f16 = 68, // 16 x f16 - v32f16 = 69, // 32 x f16 - v1f32 = 70, // 1 x f32 - v2f32 = 71, // 2 x f32 - v3f32 = 72, // 3 x f32 - v4f32 = 73, // 4 x f32 - v5f32 = 74, // 5 x f32 - v8f32 = 75, // 8 x f32 - v16f32 = 76, // 16 x f32 - v32f32 = 77, // 32 x f32 - v64f32 = 78, // 64 x f32 - v128f32 = 79, // 128 x f32 - v256f32 = 80, // 256 x f32 - v512f32 = 81, // 512 x f32 - v1024f32 = 82, // 1024 x f32 - v2048f32 = 83, // 2048 x f32 - v1f64 = 84, // 1 x f64 - v2f64 = 85, // 2 x f64 - v4f64 = 86, // 4 x f64 - v8f64 = 87, // 8 x f64 + v2f16 = 67, // 2 x f16 + v3f16 = 68, // 3 x f16 + v4f16 = 69, // 4 x f16 + v8f16 = 70, // 8 x f16 + v16f16 = 71, // 16 x f16 + v32f16 = 72, // 32 x f16 + v1f32 = 73, // 1 x f32 + v2f32 = 74, // 2 x f32 + v3f32 = 75, // 3 x f32 + v4f32 = 76, // 4 x f32 + v5f32 = 77, // 5 x f32 + v8f32 = 78, // 8 x f32 + v16f32 = 79, // 16 x f32 + v32f32 = 80, // 32 x f32 + v64f32 = 81, // 64 x f32 + v128f32 = 82, // 128 x f32 + v256f32 = 83, // 256 x f32 + v512f32 = 84, // 512 x f32 + v1024f32 = 85, // 1024 x f32 + v2048f32 = 86, // 2048 x f32 + v1f64 = 87, // 1 x f64 + v2f64 = 88, // 2 x f64 + v4f64 = 89, // 4 x f64 + v8f64 = 90, // 8 x f64 + v16f64 = 91, // 16 x f64 + v32f64 = 92, // 32 x f64 + v64f64 = 93, // 64 x f64 + v128f64 = 94, // 128 x f64 + v256f64 = 95, // 256 x f64 FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v2f16, - LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v8f64, + LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v256f64, FIRST_FIXEDLEN_VECTOR_VALUETYPE = v1i1, - LAST_FIXEDLEN_VECTOR_VALUETYPE = v8f64, - - nxv1i1 = 88, // n x 1 x i1 - nxv2i1 = 89, // n x 2 x i1 - nxv4i1 = 90, // n x 4 x i1 - nxv8i1 = 91, // n x 8 x i1 - nxv16i1 = 92, // n x 16 x i1 - nxv32i1 = 93, // n x 32 x i1 - - nxv1i8 = 94, // n x 1 x i8 - nxv2i8 = 95, // n x 2 x i8 - nxv4i8 = 96, // n x 4 x i8 - nxv8i8 = 97, // n x 8 x i8 - nxv16i8 = 98, // n x 16 x i8 - nxv32i8 = 99, // n x 32 x i8 - - nxv1i16 = 100, // n x 1 x i16 - nxv2i16 = 101, // n x 2 x i16 - nxv4i16 = 102, // n x 4 x i16 - nxv8i16 = 103, // n x 8 x i16 - nxv16i16 = 104, // n x 16 x i16 - nxv32i16 = 105, // n x 32 x i16 - - nxv1i32 = 106, // n x 1 x i32 - nxv2i32 = 107, // n x 2 x i32 - nxv4i32 = 108, // n x 4 x i32 - nxv8i32 = 109, // n x 8 x i32 - nxv16i32 = 110, // n x 16 x i32 - nxv32i32 = 111, // n x 32 x i32 - - nxv1i64 = 112, // n x 1 x i64 - nxv2i64 = 113, // n x 2 x i64 - nxv4i64 = 114, // n x 4 x i64 - nxv8i64 = 115, // n x 8 x i64 - nxv16i64 = 116, // n x 16 x i64 - nxv32i64 = 117, // n x 32 x i64 + LAST_FIXEDLEN_VECTOR_VALUETYPE = v256f64, + + nxv1i1 = 96, // n x 1 x i1 + nxv2i1 = 97, // n x 2 x i1 + nxv4i1 = 98, // n x 4 x i1 + nxv8i1 = 99, // n x 8 x i1 + nxv16i1 = 100, // n x 16 x i1 + nxv32i1 = 101, // n x 32 x i1 + + nxv1i8 = 102, // n x 1 x i8 + nxv2i8 = 103, // n x 2 x i8 + nxv4i8 = 104, // n x 4 x i8 + nxv8i8 = 105, // n x 8 x i8 + nxv16i8 = 106, // n x 16 x i8 + nxv32i8 = 107, // n x 32 x i8 + + nxv1i16 = 108, // n x 1 x i16 + nxv2i16 = 109, // n x 2 x i16 + nxv4i16 = 110, // n x 4 x i16 + nxv8i16 = 111, // n x 8 x i16 + nxv16i16 = 112, // n x 16 x i16 + nxv32i16 = 113, // n x 32 x i16 + + nxv1i32 = 114, // n x 1 x i32 + nxv2i32 = 115, // n x 2 x i32 + nxv4i32 = 116, // n x 4 x i32 + nxv8i32 = 117, // n x 8 x i32 + nxv16i32 = 118, // n x 16 x i32 + nxv32i32 = 119, // n x 32 x i32 + + nxv1i64 = 120, // n x 1 x i64 + nxv2i64 = 121, // n x 2 x i64 + nxv4i64 = 122, // n x 4 x i64 + nxv8i64 = 123, // n x 8 x i64 + nxv16i64 = 124, // n x 16 x i64 + nxv32i64 = 125, // n x 32 x i64 FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv1i1, LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv32i64, - nxv2f16 = 118, // n x 2 x f16 - nxv4f16 = 119, // n x 4 x f16 - nxv8f16 = 120, // n x 8 x f16 - nxv1f32 = 121, // n x 1 x f32 - nxv2f32 = 122, // n x 2 x f32 - nxv4f32 = 123, // n x 4 x f32 - nxv8f32 = 124, // n x 8 x f32 - nxv16f32 = 125, // n x 16 x f32 - nxv1f64 = 126, // n x 1 x f64 - nxv2f64 = 127, // n x 2 x f64 - nxv4f64 = 128, // n x 4 x f64 - nxv8f64 = 129, // n x 8 x f64 + nxv2f16 = 126, // n x 2 x f16 + nxv4f16 = 127, // n x 4 x f16 + nxv8f16 = 128, // n x 8 x f16 + nxv1f32 = 129, // n x 1 x f32 + nxv2f32 = 130, // n x 2 x f32 + nxv4f32 = 131, // n x 4 x f32 + nxv8f32 = 132, // n x 8 x f32 + nxv16f32 = 133, // n x 16 x f32 + nxv1f64 = 134, // n x 1 x f64 + nxv2f64 = 135, // n x 2 x f64 + nxv4f64 = 136, // n x 4 x f64 + nxv8f64 = 137, // n x 8 x f64 FIRST_FP_SCALABLE_VECTOR_VALUETYPE = nxv2f16, LAST_FP_SCALABLE_VECTOR_VALUETYPE = nxv8f64, @@ -207,20 +215,20 @@ FIRST_VECTOR_VALUETYPE = v1i1, LAST_VECTOR_VALUETYPE = nxv8f64, - x86mmx = 130, // This is an X86 MMX value + x86mmx = 138, // This is an X86 MMX value - Glue = 131, // This glues nodes together during pre-RA sched + Glue = 139, // This glues nodes together during pre-RA sched - isVoid = 132, // This has no value + isVoid = 140, // This has no value - Untyped = 133, // This value takes a register, but has + Untyped = 141, // This value takes a register, but has // unspecified type. The register class // will be determined by the opcode. - exnref = 134, // WebAssembly's exnref type + exnref = 142, // WebAssembly's exnref type FIRST_VALUETYPE = 1, // This is always the beginning of the list. - LAST_VALUETYPE = 135, // This always remains at the end of the list. + LAST_VALUETYPE = 143, // This always remains at the end of the list. // This is the current maximum for LAST_VALUETYPE. // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors @@ -372,17 +380,37 @@ /// Return true if this is a 1024-bit vector type. bool is1024BitVector() const { - return (SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 || + return (SimpleTy == MVT::v32f32 || SimpleTy == MVT::v16f64 || + SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 || SimpleTy == MVT::v64i16 || SimpleTy == MVT::v32i32 || SimpleTy == MVT::v16i64); } /// Return true if this is a 2048-bit vector type. bool is2048BitVector() const { - return (SimpleTy == MVT::v256i8 || SimpleTy == MVT::v128i16 || + return (SimpleTy == MVT::v64f32 || SimpleTy == MVT::v32f64 || + SimpleTy == MVT::v256i8 || SimpleTy == MVT::v128i16 || SimpleTy == MVT::v64i32 || SimpleTy == MVT::v32i64); } + /// Return true if this is a 4096-bit vector type. + bool is4096BitVector() const { + return (SimpleTy == MVT::v128f32 || SimpleTy == MVT::v64f64 || + SimpleTy == MVT::v128i32 || SimpleTy == MVT::v64i64); + } + + /// Return true if this is a 8192-bit vector type. + bool is8192BitVector() const { + return (SimpleTy == MVT::v256f32 || SimpleTy == MVT::v128f64 || + SimpleTy == MVT::v256i32 || SimpleTy == MVT::v128i64); + } + + /// Return true if this is a 16384 vector type. + bool is16384BitVector() const { + return (SimpleTy == MVT::v256i64 || SimpleTy == MVT::v512i32 || + SimpleTy == MVT::v256f64 || SimpleTy == MVT::v512f32); + } + /// Return true if this is an overloaded type for TableGen. bool isOverloaded() const { return (SimpleTy==MVT::Any || @@ -498,6 +526,9 @@ case v8i64: case v16i64: case v32i64: + case v64i64: + case v128i64: + case v256i64: case nxv1i64: case nxv2i64: case nxv4i64: @@ -537,6 +568,11 @@ case v2f64: case v4f64: case v8f64: + case v16f64: + case v32f64: + case v64f64: + case v128f64: + case v256f64: case nxv1f64: case nxv2f64: case nxv4f64: @@ -559,17 +595,23 @@ case v256i1: case v256i8: case v256i32: - case v256f32: return 256; + case v256f32: + case v256i64: + case v256f64: return 256; case v128i1: case v128i8: case v128i16: case v128i32: - case v128f32: return 128; + case v128i64: + case v128f32: + case v128f64: return 128; case v64i1: case v64i8: case v64i16: case v64i32: - case v64f32: return 64; + case v64f32: + case v64i64: + case v64f64: return 64; case v32i1: case v32i8: case v32i16: @@ -577,6 +619,7 @@ case v32i64: case v32f16: case v32f32: + case v32f64: case nxv32i1: case nxv32i8: case nxv32i16: @@ -589,6 +632,7 @@ case v16i64: case v16f16: case v16f32: + case v16f64: case nxv16i1: case nxv16i8: case nxv16i16: @@ -805,6 +849,7 @@ case v64i16: case v32i32: case v16i64: + case v16f64: case v32f32: return TypeSize::Fixed(1024); case nxv32i32: case nxv16i64: return TypeSize::Scalable(1024); @@ -812,12 +857,19 @@ case v128i16: case v64i32: case v32i64: + case v32f64: case v64f32: return TypeSize::Fixed(2048); case nxv32i64: return TypeSize::Scalable(2048); + case v64i64: + case v64f64: case v128i32: case v128f32: return TypeSize::Fixed(4096); + case v128i64: + case v128f64: case v256i32: case v256f32: return TypeSize::Fixed(8192); + case v256i64: + case v256f64: case v512i32: case v512f32: return TypeSize::Fixed(16384); case v1024i32: @@ -977,6 +1029,9 @@ if (NumElements == 8) return MVT::v8i64; if (NumElements == 16) return MVT::v16i64; if (NumElements == 32) return MVT::v32i64; + if (NumElements == 64) return MVT::v64i64; + if (NumElements == 128) return MVT::v128i64; + if (NumElements == 256) return MVT::v256i64; break; case MVT::i128: if (NumElements == 1) return MVT::v1i128; @@ -1010,6 +1065,11 @@ if (NumElements == 2) return MVT::v2f64; if (NumElements == 4) return MVT::v4f64; if (NumElements == 8) return MVT::v8f64; + if (NumElements == 16) return MVT::v16f64; + if (NumElements == 32) return MVT::v32f64; + if (NumElements == 64) return MVT::v64f64; + if (NumElements == 128) return MVT::v128f64; + if (NumElements == 256) return MVT::v256f64; break; } return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE); diff --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp --- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp +++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp @@ -50,10 +50,12 @@ Function *CallSiteFn; Function *FuncCtxFn; AllocaInst *FuncCtx; + bool Use64BitsData; public: static char ID; // Pass identification, replacement for typeid - explicit SjLjEHPrepare() : FunctionPass(ID) {} + explicit SjLjEHPrepare(bool use64 = false) + : FunctionPass(ID), Use64BitsData(use64) {} bool doInitialization(Module &M) override; bool runOnFunction(Function &F) override; @@ -77,18 +79,22 @@ false, false) // Public Interface To the SjLjEHPrepare pass. -FunctionPass *llvm::createSjLjEHPreparePass() { return new SjLjEHPrepare(); } +FunctionPass *llvm::createSjLjEHPreparePass(bool Use64BitsData) { + return new SjLjEHPrepare(Use64BitsData); +} + // doInitialization - Set up decalarations and types needed to process // exceptions. bool SjLjEHPrepare::doInitialization(Module &M) { // Build the function context structure. // builtin_setjmp uses a five word jbuf Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext()); - Type *Int32Ty = Type::getInt32Ty(M.getContext()); - doubleUnderDataTy = ArrayType::get(Int32Ty, 4); + Type *DataTy = Use64BitsData ? + Type::getInt64Ty(M.getContext()) : Type::getInt32Ty(M.getContext()); + doubleUnderDataTy = ArrayType::get(DataTy, 4); doubleUnderJBufTy = ArrayType::get(VoidPtrTy, 5); FunctionContextTy = StructType::get(VoidPtrTy, // __prev - Int32Ty, // call_site + DataTy, // call_site doubleUnderDataTy, // __data VoidPtrTy, // __personality VoidPtrTy, // __lsda @@ -112,8 +118,10 @@ Builder.CreateGEP(FunctionContextTy, FuncCtx, Idxs, "call_site"); // Insert a store of the call-site number + IntegerType *DataTy = Use64BitsData ? + Type::getInt64Ty(I->getContext()) : Type::getInt32Ty(I->getContext()); ConstantInt *CallSiteNoC = - ConstantInt::get(Type::getInt32Ty(I->getContext()), Number); + ConstantInt::get(DataTy, Number); Builder.CreateStore(CallSiteNoC, CallSite, true /*volatile*/); } @@ -190,16 +198,21 @@ Builder.CreateConstGEP2_32(FunctionContextTy, FuncCtx, 0, 2, "__data"); // The exception values come back in context->__data[0]. - Type *Int32Ty = Type::getInt32Ty(F.getContext()); + IntegerType *DataTy = Use64BitsData ? + Type::getInt64Ty(F.getContext()) : Type::getInt32Ty(F.getContext()); Value *ExceptionAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData, 0, 0, "exception_gep"); - Value *ExnVal = Builder.CreateLoad(Int32Ty, ExceptionAddr, true, "exn_val"); + Value *ExnVal = Builder.CreateLoad(DataTy, ExceptionAddr, true, "exn_val"); ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy()); Value *SelectorAddr = Builder.CreateConstGEP2_32(doubleUnderDataTy, FCData, 0, 1, "exn_selector_gep"); Value *SelVal = - Builder.CreateLoad(Int32Ty, SelectorAddr, true, "exn_selector_val"); + Builder.CreateLoad(DataTy, SelectorAddr, true, "exn_selector_val"); + + // SelVal must be Int32Ty, so trunc it + if (Use64BitsData) + SelVal = Builder.CreateTrunc(SelVal, Type::getInt32Ty(F.getContext())); substituteLPadValues(LPI, ExnVal, SelVal); } diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp --- a/llvm/lib/CodeGen/TargetPassConfig.cpp +++ b/llvm/lib/CodeGen/TargetPassConfig.cpp @@ -695,7 +695,10 @@ // removed from the parent invoke(s). This could happen when a landing // pad is shared by multiple invokes and is also a target of a normal // edge from elsewhere. - addPass(createSjLjEHPreparePass()); + if (TM->getTargetTriple().getArch() == Triple::ve) + addPass(createSjLjEHPreparePass(true)); + else + addPass(createSjLjEHPreparePass()); LLVM_FALLTHROUGH; case ExceptionHandling::DwarfCFI: case ExceptionHandling::ARM: diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp --- a/llvm/lib/CodeGen/ValueTypes.cpp +++ b/llvm/lib/CodeGen/ValueTypes.cpp @@ -92,6 +92,18 @@ return isExtendedVector() && getExtendedSizeInBits() == 2048; } +bool EVT::isExtended4096BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 4096; +} + +bool EVT::isExtended8192BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 8192; +} + +bool EVT::isExtended16384BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 16384; +} + EVT EVT::getExtendedVectorElementType() const { assert(isExtended() && "Type is not extended!"); return EVT::getEVT(cast(LLVMTy)->getElementType()); @@ -205,6 +217,9 @@ case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8); case MVT::v16i64: return VectorType::get(Type::getInt64Ty(Context), 16); case MVT::v32i64: return VectorType::get(Type::getInt64Ty(Context), 32); + case MVT::v64i64: return VectorType::get(Type::getInt64Ty(Context), 64); + case MVT::v128i64: return VectorType::get(Type::getInt64Ty(Context), 128); + case MVT::v256i64: return VectorType::get(Type::getInt64Ty(Context), 256); case MVT::v1i128: return VectorType::get(Type::getInt128Ty(Context), 1); case MVT::v2f16: return VectorType::get(Type::getHalfTy(Context), 2); case MVT::v3f16: return VectorType::get(Type::getHalfTy(Context), 3); @@ -230,6 +245,11 @@ case MVT::v2f64: return VectorType::get(Type::getDoubleTy(Context), 2); case MVT::v4f64: return VectorType::get(Type::getDoubleTy(Context), 4); case MVT::v8f64: return VectorType::get(Type::getDoubleTy(Context), 8); + case MVT::v16f64: return VectorType::get(Type::getDoubleTy(Context), 16); + case MVT::v32f64: return VectorType::get(Type::getDoubleTy(Context), 32); + case MVT::v64f64: return VectorType::get(Type::getDoubleTy(Context), 64); + case MVT::v128f64: return VectorType::get(Type::getDoubleTy(Context), 128); + case MVT::v256f64: return VectorType::get(Type::getDoubleTy(Context), 256); case MVT::nxv1i1: return VectorType::get(Type::getInt1Ty(Context), 1, /*Scalable=*/ true); case MVT::nxv2i1: diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -697,17 +697,18 @@ IIT_PTR_TO_ELT = 33, IIT_VEC_OF_ANYPTRS_TO_ELT = 34, IIT_I128 = 35, - IIT_V512 = 36, - IIT_V1024 = 37, - IIT_STRUCT6 = 38, - IIT_STRUCT7 = 39, - IIT_STRUCT8 = 40, - IIT_F128 = 41, - IIT_VEC_ELEMENT = 42, - IIT_SCALABLE_VEC = 43, - IIT_SUBDIVIDE2_ARG = 44, - IIT_SUBDIVIDE4_ARG = 45, - IIT_VEC_OF_BITCASTS_TO_INT = 46 + IIT_V256 = 36, + IIT_V512 = 37, + IIT_V1024 = 38, + IIT_STRUCT6 = 39, + IIT_STRUCT7 = 40, + IIT_STRUCT8 = 41, + IIT_F128 = 42, + IIT_VEC_ELEMENT = 43, + IIT_SCALABLE_VEC = 44, + IIT_SUBDIVIDE2_ARG = 45, + IIT_SUBDIVIDE4_ARG = 46, + IIT_VEC_OF_BITCASTS_TO_INT = 47 }; static void DecodeIITType(unsigned &NextElt, ArrayRef Infos, @@ -791,6 +792,10 @@ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 64)); DecodeIITType(NextElt, Infos, OutputTable); return; + case IIT_V256: + OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 256)); + DecodeIITType(NextElt, Infos, OutputTable); + return; case IIT_V512: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Vector, 512)); DecodeIITType(NextElt, Infos, OutputTable); diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp --- a/llvm/lib/Support/Triple.cpp +++ b/llvm/lib/Support/Triple.cpp @@ -70,6 +70,7 @@ case wasm64: return "wasm64"; case renderscript32: return "renderscript32"; case renderscript64: return "renderscript64"; + case ve: return "ve"; } llvm_unreachable("Invalid ArchType!"); @@ -144,6 +145,8 @@ case riscv32: case riscv64: return "riscv"; + + case ve: return "ve"; } } @@ -315,6 +318,7 @@ .Case("wasm64", wasm64) .Case("renderscript32", renderscript32) .Case("renderscript64", renderscript64) + .Case("ve", ve) .Default(UnknownArch); } @@ -443,6 +447,7 @@ .Case("wasm64", Triple::wasm64) .Case("renderscript32", Triple::renderscript32) .Case("renderscript64", Triple::renderscript64) + .Case("ve", Triple::ve) .Default(Triple::UnknownArch); // Some architectures require special parsing logic just to compute the @@ -701,6 +706,7 @@ case Triple::tcele: case Triple::thumbeb: case Triple::xcore: + case Triple::ve: return Triple::ELF; case Triple::ppc: @@ -1284,6 +1290,7 @@ case llvm::Triple::spir64: case llvm::Triple::wasm64: case llvm::Triple::renderscript64: + case llvm::Triple::ve: return 64; } llvm_unreachable("Invalid architecture value"); @@ -1312,6 +1319,7 @@ case Triple::msp430: case Triple::systemz: case Triple::ppc64le: + case Triple::ve: T.setArch(UnknownArch); break; @@ -1404,6 +1412,7 @@ case Triple::x86_64: case Triple::wasm64: case Triple::renderscript64: + case Triple::ve: // Already 64-bit. break; @@ -1462,6 +1471,7 @@ case Triple::xcore: case Triple::renderscript32: case Triple::renderscript64: + case Triple::ve: // ARM is intentionally unsupported here, changing the architecture would // drop any arch suffixes. @@ -1553,6 +1563,7 @@ case Triple::tcele: case Triple::renderscript32: case Triple::renderscript64: + case Triple::ve: return true; default: return false; diff --git a/llvm/lib/Target/LLVMBuild.txt b/llvm/lib/Target/LLVMBuild.txt --- a/llvm/lib/Target/LLVMBuild.txt +++ b/llvm/lib/Target/LLVMBuild.txt @@ -36,6 +36,7 @@ WebAssembly X86 XCore + VE ; This is a special group whose required libraries are extended (by llvm-build) ; with the best execution engine (the native JIT, if available, or the diff --git a/llvm/lib/Target/VE/AsmParser/CMakeLists.txt b/llvm/lib/Target/VE/AsmParser/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/AsmParser/CMakeLists.txt @@ -0,0 +1,3 @@ +add_llvm_component_library(LLVMVEAsmParser + VEAsmParser.cpp + ) diff --git a/llvm/lib/Target/VE/AsmParser/LLVMBuild.txt b/llvm/lib/Target/VE/AsmParser/LLVMBuild.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/AsmParser/LLVMBuild.txt @@ -0,0 +1,22 @@ +;===- ./lib/Target/VE/AsmParser/LLVMBuild.txt ------------------*- Conf -*--===; +; +; Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +; See https://llvm.org/LICENSE.txt for license information. +; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = VEAsmParser +parent = VE +required_libraries = MC MCParser VEDesc VEInfo Support +add_to_library_groups = VE diff --git a/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp b/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp @@ -0,0 +1,832 @@ +//===-- VEAsmParser.cpp - Parse VE assembly to MCInst instructions --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/VEMCExpr.h" +#include "MCTargetDesc/VEMCTargetDesc.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Triple.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCObjectFileInfo.h" +#include "llvm/MC/MCParser/MCAsmLexer.h" +#include "llvm/MC/MCParser/MCAsmParser.h" +#include "llvm/MC/MCParser/MCParsedAsmOperand.h" +#include "llvm/MC/MCParser/MCTargetAsmParser.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SMLoc.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include +#include + +using namespace llvm; + +// The generated AsmMatcher VEGenAsmMatcher uses "VE" as the target +// namespace. But SPARC backend uses "SP" as its namespace. +namespace llvm { +namespace VE { + +using namespace VE; + +} // namespace VE +} // namespace llvm + +namespace { + +class VEOperand; + +class VEAsmParser : public MCTargetAsmParser { + MCAsmParser &Parser; + + /// @name Auto-generated Match Functions + /// { + +#define GET_ASSEMBLER_HEADER +#include "VEGenAsmMatcher.inc" + + /// } + + // public interface of the MCTargetAsmParser. + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, MCStreamer &Out, + uint64_t &ErrorInfo, + bool MatchingInlineAsm) override; + bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) override; + bool ParseDirective(AsmToken DirectiveID) override; + + unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, + unsigned Kind) override; + + // Custom parse functions for VE specific operands. + OperandMatchResultTy parseMEMOperand(OperandVector &Operands); + + OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Name); + + OperandMatchResultTy parseVEAsmOperand(std::unique_ptr &Operand, + bool isCall = false); + + OperandMatchResultTy parseBranchModifiers(OperandVector &Operands); + + // Helper function for dealing with %lo / %hi in PIC mode. + const VEMCExpr *adjustPICRelocation(VEMCExpr::VariantKind VK, + const MCExpr *subExpr); + + // returns true if Tok is matched to a register and returns register in RegNo. + bool matchRegisterName(const AsmToken &Tok, unsigned &RegNo, + unsigned &RegKind); + + bool matchVEAsmModifiers(const MCExpr *&EVal, SMLoc &EndLoc); + bool parseDirectiveWord(unsigned Size, SMLoc L); + + bool is64Bit() const { + return getSTI().getTargetTriple().getArch() == Triple::sparcv9; + } + +public: + VEAsmParser(const MCSubtargetInfo &sti, MCAsmParser &parser, + const MCInstrInfo &MII, const MCTargetOptions &Options) + : MCTargetAsmParser(Options, sti, MII), Parser(parser) { + // Initialize the set of available features. + setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); + } +}; + +} // end anonymous namespace + +static const MCPhysReg IntRegs[64] = { + VE::SX0, VE::SX1, VE::SX2, VE::SX3, VE::SX4, VE::SX5, VE::SX6, + VE::SX7, VE::SX8, VE::SX9, VE::SX10, VE::SX11, VE::SX12, VE::SX13, + VE::SX14, VE::SX15, VE::SX16, VE::SX17, VE::SX18, VE::SX19, VE::SX20, + VE::SX21, VE::SX22, VE::SX23, VE::SX24, VE::SX25, VE::SX26, VE::SX27, + VE::SX28, VE::SX29, VE::SX30, VE::SX31, VE::SX32, VE::SX33, VE::SX34, + VE::SX35, VE::SX36, VE::SX37, VE::SX38, VE::SX39, VE::SX40, VE::SX41, + VE::SX42, VE::SX43, VE::SX44, VE::SX45, VE::SX46, VE::SX47, VE::SX48, + VE::SX49, VE::SX50, VE::SX51, VE::SX52, VE::SX53, VE::SX54, VE::SX55, + VE::SX56, VE::SX57, VE::SX58, VE::SX59, VE::SX60, VE::SX61, VE::SX62, + VE::SX63}; + +namespace { + +/// VEOperand - Instances of this class represent a parsed VE machine +/// instruction. +class VEOperand : public MCParsedAsmOperand { +public: + enum RegisterKind { + rk_None, + rk_IntReg, + rk_IntPairReg, + rk_FloatReg, + rk_DoubleReg, + rk_QuadReg, + rk_CoprocReg, + rk_CoprocPairReg, + rk_Special, + }; + +private: + enum KindTy { + k_Token, + k_Register, + k_Immediate, + k_MemoryReg, + k_MemoryImm + } Kind; + + SMLoc StartLoc, EndLoc; + + struct Token { + const char *Data; + unsigned Length; + }; + + struct RegOp { + unsigned RegNum; + RegisterKind Kind; + }; + + struct ImmOp { + const MCExpr *Val; + }; + + struct MemOp { + unsigned Base; + unsigned OffsetReg; + const MCExpr *Off; + }; + + union { + struct Token Tok; + struct RegOp Reg; + struct ImmOp Imm; + struct MemOp Mem; + }; + +public: + VEOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} + + bool isToken() const override { return Kind == k_Token; } + bool isReg() const override { return Kind == k_Register; } + bool isImm() const override { return Kind == k_Immediate; } + bool isMem() const override { return isMEMrr() || isMEMri(); } + bool isMEMrr() const { return Kind == k_MemoryReg; } + bool isMEMri() const { return Kind == k_MemoryImm; } + + bool isIntReg() const { + return (Kind == k_Register && Reg.Kind == rk_IntReg); + } + + bool isFloatReg() const { + return (Kind == k_Register && Reg.Kind == rk_FloatReg); + } + + bool isFloatOrDoubleReg() const { + return (Kind == k_Register && + (Reg.Kind == rk_FloatReg || Reg.Kind == rk_DoubleReg)); + } + + bool isCoprocReg() const { + return (Kind == k_Register && Reg.Kind == rk_CoprocReg); + } + + StringRef getToken() const { + assert(Kind == k_Token && "Invalid access!"); + return StringRef(Tok.Data, Tok.Length); + } + + unsigned getReg() const override { + assert((Kind == k_Register) && "Invalid access!"); + return Reg.RegNum; + } + + const MCExpr *getImm() const { + assert((Kind == k_Immediate) && "Invalid access!"); + return Imm.Val; + } + + unsigned getMemBase() const { + assert((Kind == k_MemoryReg || Kind == k_MemoryImm) && "Invalid access!"); + return Mem.Base; + } + + unsigned getMemOffsetReg() const { + assert((Kind == k_MemoryReg) && "Invalid access!"); + return Mem.OffsetReg; + } + + const MCExpr *getMemOff() const { + assert((Kind == k_MemoryImm) && "Invalid access!"); + return Mem.Off; + } + + /// getStartLoc - Get the location of the first token of this operand. + SMLoc getStartLoc() const override { return StartLoc; } + /// getEndLoc - Get the location of the last token of this operand. + SMLoc getEndLoc() const override { return EndLoc; } + + void print(raw_ostream &OS) const override { + switch (Kind) { + case k_Token: + OS << "Token: " << getToken() << "\n"; + break; + case k_Register: + OS << "Reg: #" << getReg() << "\n"; + break; + case k_Immediate: + OS << "Imm: " << getImm() << "\n"; + break; + case k_MemoryReg: + OS << "Mem: " << getMemBase() << "+" << getMemOffsetReg() << "\n"; + break; + case k_MemoryImm: + assert(getMemOff() != nullptr); + OS << "Mem: " << getMemBase() << "+" << *getMemOff() << "\n"; + break; + } + } + + void addRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createReg(getReg())); + } + + void addImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + const MCExpr *Expr = getImm(); + addExpr(Inst, Expr); + } + + void addExpr(MCInst &Inst, const MCExpr *Expr) const { + // Add as immediate when possible. Null MCExpr = 0. + if (!Expr) + Inst.addOperand(MCOperand::createImm(0)); + else if (const MCConstantExpr *CE = dyn_cast(Expr)) + Inst.addOperand(MCOperand::createImm(CE->getValue())); + else + Inst.addOperand(MCOperand::createExpr(Expr)); + } + + void addMEMrrOperands(MCInst &Inst, unsigned N) const { + assert(N == 2 && "Invalid number of operands!"); + + Inst.addOperand(MCOperand::createReg(getMemBase())); + + assert(getMemOffsetReg() != 0 && "Invalid offset"); + Inst.addOperand(MCOperand::createReg(getMemOffsetReg())); + } + + void addMEMriOperands(MCInst &Inst, unsigned N) const { + assert(N == 2 && "Invalid number of operands!"); + + Inst.addOperand(MCOperand::createReg(getMemBase())); + + const MCExpr *Expr = getMemOff(); + addExpr(Inst, Expr); + } + + static std::unique_ptr CreateToken(StringRef Str, SMLoc S) { + auto Op = std::make_unique(k_Token); + Op->Tok.Data = Str.data(); + Op->Tok.Length = Str.size(); + Op->StartLoc = S; + Op->EndLoc = S; + return Op; + } + + static std::unique_ptr CreateReg(unsigned RegNum, unsigned Kind, + SMLoc S, SMLoc E) { + auto Op = std::make_unique(k_Register); + Op->Reg.RegNum = RegNum; + Op->Reg.Kind = (VEOperand::RegisterKind)Kind; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static std::unique_ptr CreateImm(const MCExpr *Val, SMLoc S, + SMLoc E) { + auto Op = std::make_unique(k_Immediate); + Op->Imm.Val = Val; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static std::unique_ptr + MorphToMEMrr(unsigned Base, std::unique_ptr Op) { + unsigned offsetReg = Op->getReg(); + Op->Kind = k_MemoryReg; + Op->Mem.Base = Base; + Op->Mem.OffsetReg = offsetReg; + Op->Mem.Off = nullptr; + return Op; + } + + static std::unique_ptr CreateMEMr(unsigned Base, SMLoc S, + SMLoc E) { + auto Op = std::make_unique(k_MemoryReg); + Op->Mem.Base = Base; + Op->Mem.OffsetReg = 0; // always 0 + Op->Mem.Off = nullptr; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static std::unique_ptr + MorphToMEMri(unsigned Base, std::unique_ptr Op) { + const MCExpr *Imm = Op->getImm(); + Op->Kind = k_MemoryImm; + Op->Mem.Base = Base; + Op->Mem.OffsetReg = 0; + Op->Mem.Off = Imm; + return Op; + } +}; + +} // end anonymous namespace + +bool VEAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, + OperandVector &Operands, + MCStreamer &Out, uint64_t &ErrorInfo, + bool MatchingInlineAsm) { + MCInst Inst; + SmallVector Instructions; + unsigned MatchResult = + MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); + switch (MatchResult) { + case Match_Success: { + for (const MCInst &I : Instructions) { + Out.EmitInstruction(I, getSTI()); + } + return false; + } + + case Match_MissingFeature: + return Error(IDLoc, + "instruction requires a CPU feature not currently enabled"); + + case Match_InvalidOperand: { + SMLoc ErrorLoc = IDLoc; + if (ErrorInfo != ~0ULL) { + if (ErrorInfo >= Operands.size()) + return Error(IDLoc, "too few operands for instruction"); + + ErrorLoc = ((VEOperand &)*Operands[ErrorInfo]).getStartLoc(); + if (ErrorLoc == SMLoc()) + ErrorLoc = IDLoc; + } + + return Error(ErrorLoc, "invalid operand for instruction"); + } + case Match_MnemonicFail: + return Error(IDLoc, "invalid instruction mnemonic"); + } + llvm_unreachable("Implement any new match types added!"); +} + +bool VEAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, + SMLoc &EndLoc) { + const AsmToken &Tok = Parser.getTok(); + StartLoc = Tok.getLoc(); + EndLoc = Tok.getEndLoc(); + RegNo = 0; + if (getLexer().getKind() != AsmToken::Percent) + return false; + Parser.Lex(); + unsigned regKind = VEOperand::rk_None; + if (matchRegisterName(Tok, RegNo, regKind)) { + Parser.Lex(); + return false; + } + + return Error(StartLoc, "invalid register name"); +} + +bool VEAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) { + + // First operand in MCInst is instruction mnemonic. + Operands.push_back(VEOperand::CreateToken(Name, NameLoc)); + + if (getLexer().isNot(AsmToken::EndOfStatement)) { + // Read the first operand. + if (getLexer().is(AsmToken::Comma)) { + if (parseBranchModifiers(Operands) != MatchOperand_Success) { + SMLoc Loc = getLexer().getLoc(); + return Error(Loc, "unexpected token"); + } + } + if (parseOperand(Operands, Name) != MatchOperand_Success) { + SMLoc Loc = getLexer().getLoc(); + return Error(Loc, "unexpected token"); + } + + while (getLexer().is(AsmToken::Comma) || getLexer().is(AsmToken::Plus)) { + if (getLexer().is(AsmToken::Plus)) { + // Plus tokens are significant in software_traps (p83, sparcv8.pdf). We + // must capture them. + Operands.push_back( + VEOperand::CreateToken("+", Parser.getTok().getLoc())); + } + Parser.Lex(); // Eat the comma or plus. + // Parse and remember the operand. + if (parseOperand(Operands, Name) != MatchOperand_Success) { + SMLoc Loc = getLexer().getLoc(); + return Error(Loc, "unexpected token"); + } + } + } + if (getLexer().isNot(AsmToken::EndOfStatement)) { + SMLoc Loc = getLexer().getLoc(); + return Error(Loc, "unexpected token"); + } + Parser.Lex(); // Consume the EndOfStatement. + return false; +} + +bool VEAsmParser::ParseDirective(AsmToken DirectiveID) { + StringRef IDVal = DirectiveID.getString(); + + if (IDVal == ".byte") + return parseDirectiveWord(1, DirectiveID.getLoc()); + + if (IDVal == ".half") + return parseDirectiveWord(2, DirectiveID.getLoc()); + + if (IDVal == ".word") + return parseDirectiveWord(4, DirectiveID.getLoc()); + + if (IDVal == ".nword") + return parseDirectiveWord(is64Bit() ? 8 : 4, DirectiveID.getLoc()); + + if (is64Bit() && IDVal == ".xword") + return parseDirectiveWord(8, DirectiveID.getLoc()); + + if (IDVal == ".register") { + // For now, ignore .register directive. + Parser.eatToEndOfStatement(); + return false; + } + if (IDVal == ".proc") { + // For compatibility, ignore this directive. + // (It's supposed to be an "optimization" in the Sun assembler) + Parser.eatToEndOfStatement(); + return false; + } + + // Let the MC layer to handle other directives. + return true; +} + +bool VEAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { + if (getLexer().isNot(AsmToken::EndOfStatement)) { + while (true) { + const MCExpr *Value; + if (getParser().parseExpression(Value)) + return true; + + getParser().getStreamer().EmitValue(Value, Size); + + if (getLexer().is(AsmToken::EndOfStatement)) + break; + + // FIXME: Improve diagnostic. + if (getLexer().isNot(AsmToken::Comma)) + return Error(L, "unexpected token in directive"); + Parser.Lex(); + } + } + Parser.Lex(); + return false; +} + +OperandMatchResultTy VEAsmParser::parseMEMOperand(OperandVector &Operands) { + SMLoc S, E; + unsigned BaseReg = 0; + + if (ParseRegister(BaseReg, S, E)) { + return MatchOperand_NoMatch; + } + + switch (getLexer().getKind()) { + default: + return MatchOperand_NoMatch; + + case AsmToken::Comma: + case AsmToken::RBrac: + case AsmToken::EndOfStatement: + Operands.push_back(VEOperand::CreateMEMr(BaseReg, S, E)); + return MatchOperand_Success; + + case AsmToken::Plus: + Parser.Lex(); // Eat the '+' + break; + case AsmToken::Minus: + break; + } + + std::unique_ptr Offset; + OperandMatchResultTy ResTy = parseVEAsmOperand(Offset); + if (ResTy != MatchOperand_Success || !Offset) + return MatchOperand_NoMatch; + + Operands.push_back(Offset->isImm() + ? VEOperand::MorphToMEMri(BaseReg, std::move(Offset)) + : VEOperand::MorphToMEMrr(BaseReg, std::move(Offset))); + + return MatchOperand_Success; +} + +OperandMatchResultTy VEAsmParser::parseOperand(OperandVector &Operands, + StringRef Mnemonic) { + + OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); + + // If there wasn't a custom match, try the generic matcher below. Otherwise, + // there was a match, but an error occurred, in which case, just return that + // the operand parsing failed. + if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail) + return ResTy; + + if (getLexer().is(AsmToken::LBrac)) { + // Memory operand + Operands.push_back(VEOperand::CreateToken("[", Parser.getTok().getLoc())); + Parser.Lex(); // Eat the [ + + if (Mnemonic == "cas" || Mnemonic == "casx" || Mnemonic == "casa") { + SMLoc S = Parser.getTok().getLoc(); + if (getLexer().getKind() != AsmToken::Percent) + return MatchOperand_NoMatch; + Parser.Lex(); // eat % + + unsigned RegNo, RegKind; + if (!matchRegisterName(Parser.getTok(), RegNo, RegKind)) + return MatchOperand_NoMatch; + + Parser.Lex(); // Eat the identifier token. + SMLoc E = + SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back(VEOperand::CreateReg(RegNo, RegKind, S, E)); + ResTy = MatchOperand_Success; + } else { + ResTy = parseMEMOperand(Operands); + } + + if (ResTy != MatchOperand_Success) + return ResTy; + + if (!getLexer().is(AsmToken::RBrac)) + return MatchOperand_ParseFail; + + Operands.push_back(VEOperand::CreateToken("]", Parser.getTok().getLoc())); + Parser.Lex(); // Eat the ] + + // Parse an optional address-space identifier after the address. + if (getLexer().is(AsmToken::Integer)) { + std::unique_ptr Op; + ResTy = parseVEAsmOperand(Op, false); + if (ResTy != MatchOperand_Success || !Op) + return MatchOperand_ParseFail; + Operands.push_back(std::move(Op)); + } + return MatchOperand_Success; + } + + std::unique_ptr Op; + + ResTy = parseVEAsmOperand(Op, (Mnemonic == "call")); + if (ResTy != MatchOperand_Success || !Op) + return MatchOperand_ParseFail; + + // Push the parsed operand into the list of operands + Operands.push_back(std::move(Op)); + + return MatchOperand_Success; +} + +OperandMatchResultTy +VEAsmParser::parseVEAsmOperand(std::unique_ptr &Op, bool isCall) { + SMLoc S = Parser.getTok().getLoc(); + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + const MCExpr *EVal; + + Op = nullptr; + switch (getLexer().getKind()) { + default: + break; + + case AsmToken::Percent: + Parser.Lex(); // Eat the '%'. + unsigned RegNo; + unsigned RegKind; + if (matchRegisterName(Parser.getTok(), RegNo, RegKind)) { + Parser.Lex(); // Eat the identifier token. + E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + switch (RegNo) { + default: + Op = VEOperand::CreateReg(RegNo, RegKind, S, E); + break; + } + break; + } + if (matchVEAsmModifiers(EVal, E)) { + E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Op = VEOperand::CreateImm(EVal, S, E); + } + break; + + case AsmToken::Minus: + case AsmToken::Integer: + case AsmToken::LParen: + case AsmToken::Dot: + if (!getParser().parseExpression(EVal, E)) + Op = VEOperand::CreateImm(EVal, S, E); + break; + + case AsmToken::Identifier: { + StringRef Identifier; + if (!getParser().parseIdentifier(Identifier)) { + E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier); + + const MCExpr *Res = + MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext()); + Op = VEOperand::CreateImm(Res, S, E); + } + break; + } + } + return (Op) ? MatchOperand_Success : MatchOperand_ParseFail; +} + +OperandMatchResultTy +VEAsmParser::parseBranchModifiers(OperandVector &Operands) { + // parse (,a|,pn|,pt)+ + + while (getLexer().is(AsmToken::Comma)) { + Parser.Lex(); // Eat the comma + + if (!getLexer().is(AsmToken::Identifier)) + return MatchOperand_ParseFail; + StringRef modName = Parser.getTok().getString(); + if (modName == "a" || modName == "pn" || modName == "pt") { + Operands.push_back( + VEOperand::CreateToken(modName, Parser.getTok().getLoc())); + Parser.Lex(); // eat the identifier. + } + } + return MatchOperand_Success; +} + +bool VEAsmParser::matchRegisterName(const AsmToken &Tok, unsigned &RegNo, + unsigned &RegKind) { + int64_t intVal = 0; + RegNo = 0; + RegKind = VEOperand::rk_None; + if (Tok.is(AsmToken::Identifier)) { + StringRef Name = Tok.getString(); + + // %fp + if (Name.equals("fp")) { + RegNo = VE::SX9; + RegKind = VEOperand::rk_IntReg; + return true; + } + // %sp + if (Name.equals("sp")) { + RegNo = VE::SX11; + RegKind = VEOperand::rk_IntReg; + return true; + } + + // %s0 - %s63 + if (Name.substr(0, 1).equals_lower("s") && + !Name.substr(1).getAsInteger(10, intVal) && intVal < 64) { + RegNo = IntRegs[intVal]; + RegKind = VEOperand::rk_IntReg; + return true; + } + + if (Name.equals("usrcc")) { + RegNo = VE::UCC; + RegKind = VEOperand::rk_Special; + return true; + } + } + return false; +} + +// Determine if an expression contains a reference to the symbol +// "_GLOBAL_OFFSET_TABLE_". +static bool hasGOTReference(const MCExpr *Expr) { + switch (Expr->getKind()) { + case MCExpr::Target: + if (const VEMCExpr *SE = dyn_cast(Expr)) + return hasGOTReference(SE->getSubExpr()); + break; + + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast(Expr); + return hasGOTReference(BE->getLHS()) || hasGOTReference(BE->getRHS()); + } + + case MCExpr::SymbolRef: { + const MCSymbolRefExpr &SymRef = *cast(Expr); + return (SymRef.getSymbol().getName() == "_GLOBAL_OFFSET_TABLE_"); + } + + case MCExpr::Unary: + return hasGOTReference(cast(Expr)->getSubExpr()); + } + return false; +} + +const VEMCExpr *VEAsmParser::adjustPICRelocation(VEMCExpr::VariantKind VK, + const MCExpr *subExpr) { + // When in PIC mode, "%lo(...)" and "%hi(...)" behave differently. + // If the expression refers contains _GLOBAL_OFFSETE_TABLE, it is + // actually a %pc10 or %pc22 relocation. Otherwise, they are interpreted + // as %got10 or %got22 relocation. + + if (getContext().getObjectFileInfo()->isPositionIndependent()) { + switch (VK) { + default: + break; + case VEMCExpr::VK_VE_LO32: + VK = (hasGOTReference(subExpr) ? VEMCExpr::VK_VE_PC_LO32 + : VEMCExpr::VK_VE_GOT_LO32); + break; + case VEMCExpr::VK_VE_HI32: + VK = (hasGOTReference(subExpr) ? VEMCExpr::VK_VE_PC_HI32 + : VEMCExpr::VK_VE_GOT_HI32); + break; + } + } + + return VEMCExpr::create(VK, subExpr, getContext()); +} + +bool VEAsmParser::matchVEAsmModifiers(const MCExpr *&EVal, SMLoc &EndLoc) { + AsmToken Tok = Parser.getTok(); + if (!Tok.is(AsmToken::Identifier)) + return false; + + StringRef Name = Tok.getString(); + + VEMCExpr::VariantKind VK = VEMCExpr::parseVariantKind(Name); + + if (VK == VEMCExpr::VK_VE_None) + return false; + + Parser.Lex(); // Eat the identifier. + if (Parser.getTok().getKind() != AsmToken::LParen) + return false; + + Parser.Lex(); // Eat the LParen token. + const MCExpr *subExpr; + if (Parser.parseParenExpression(subExpr, EndLoc)) + return false; + + EVal = adjustPICRelocation(VK, subExpr); + return true; +} + +extern "C" void LLVMInitializeVEAsmParser() { + RegisterMCAsmParser A(getTheVETarget()); +} + +#define GET_REGISTER_MATCHER +#define GET_MATCHER_IMPLEMENTATION +#include "VEGenAsmMatcher.inc" + +unsigned VEAsmParser::validateTargetOperandClass(MCParsedAsmOperand &GOp, + unsigned Kind) { + VEOperand &Op = (VEOperand &)GOp; + if (Op.isFloatOrDoubleReg()) { + switch (Kind) { + default: + break; + } + } + return Match_InvalidOperand; +} diff --git a/llvm/lib/Target/VE/CMakeLists.txt b/llvm/lib/Target/VE/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/CMakeLists.txt @@ -0,0 +1,31 @@ +set(LLVM_TARGET_DEFINITIONS VE.td) + +tablegen(LLVM VEGenRegisterInfo.inc -gen-register-info) +tablegen(LLVM VEGenInstrInfo.inc -gen-instr-info) +tablegen(LLVM VEGenDisassemblerTables.inc -gen-disassembler) +tablegen(LLVM VEGenMCCodeEmitter.inc -gen-emitter) +tablegen(LLVM VEGenAsmWriter.inc -gen-asm-writer) +tablegen(LLVM VEGenAsmMatcher.inc -gen-asm-matcher) +tablegen(LLVM VEGenDAGISel.inc -gen-dag-isel) +tablegen(LLVM VEGenSubtargetInfo.inc -gen-subtarget) +tablegen(LLVM VEGenCallingConv.inc -gen-callingconv) +add_public_tablegen_target(VECommonTableGen) + +add_llvm_target(VECodeGen + LVLGen.cpp + VEAsmPrinter.cpp + VEFrameLowering.cpp + VEISelDAGToDAG.cpp + VEISelLowering.cpp + VEInstrInfo.cpp + VEMachineFunctionInfo.cpp + VEMCInstLower.cpp + VERegisterInfo.cpp + VESubtarget.cpp + VETargetMachine.cpp + ) + +add_subdirectory(AsmParser) +add_subdirectory(InstPrinter) +add_subdirectory(TargetInfo) +add_subdirectory(MCTargetDesc) diff --git a/llvm/lib/Target/VE/InstPrinter/CMakeLists.txt b/llvm/lib/Target/VE/InstPrinter/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/InstPrinter/CMakeLists.txt @@ -0,0 +1,3 @@ +add_llvm_component_library(LLVMVEAsmPrinter + VEInstPrinter.cpp + ) diff --git a/llvm/lib/Target/VE/InstPrinter/LLVMBuild.txt b/llvm/lib/Target/VE/InstPrinter/LLVMBuild.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/InstPrinter/LLVMBuild.txt @@ -0,0 +1,22 @@ +;===- ./lib/Target/VE/InstPrinter/LLVMBuild.txt ----------------*- Conf -*--===; +; +; Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +; See https://llvm.org/LICENSE.txt for license information. +; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = VEAsmPrinter +parent = VE +required_libraries = MC Support +add_to_library_groups = VE diff --git a/llvm/lib/Target/VE/InstPrinter/VEInstPrinter.h b/llvm/lib/Target/VE/InstPrinter/VEInstPrinter.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/InstPrinter/VEInstPrinter.h @@ -0,0 +1,57 @@ +//===-- VEInstPrinter.h - Convert VE MCInst to assembly syntax ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class prints an VE MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_INSTPRINTER_VEINSTPRINTER_H +#define LLVM_LIB_TARGET_VE_INSTPRINTER_VEINSTPRINTER_H + +#include "llvm/MC/MCInstPrinter.h" + +namespace llvm { + +class VEInstPrinter : public MCInstPrinter { +public: + VEInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, + const MCRegisterInfo &MRI) + : MCInstPrinter(MAI, MII, MRI) {} + + void printRegName(raw_ostream &OS, unsigned RegNo) const override; + void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot, + const MCSubtargetInfo &STI) override; + bool printVEAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI, + raw_ostream &OS); + + // Autogenerated by tblgen. + void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI, + raw_ostream &O); + bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI, + raw_ostream &O); + void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx, + unsigned PrintMethodIdx, + const MCSubtargetInfo &STI, raw_ostream &O); + static const char *getRegisterName(unsigned RegNo); + + void printOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI, + raw_ostream &OS); + void printMemASXOperand(const MCInst *MI, int opNum, + const MCSubtargetInfo &STI, raw_ostream &OS, + const char *Modifier = nullptr); + void printMemASOperand(const MCInst *MI, int opNum, + const MCSubtargetInfo &STI, raw_ostream &OS, + const char *Modifier = nullptr); + void printCCOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI, + raw_ostream &OS); + bool printGetGOT(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, + raw_ostream &OS); +}; +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/InstPrinter/VEInstPrinter.cpp b/llvm/lib/Target/VE/InstPrinter/VEInstPrinter.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/InstPrinter/VEInstPrinter.cpp @@ -0,0 +1,132 @@ +//===-- VEInstPrinter.cpp - Convert VE MCInst to assembly syntax -----------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This class prints an VE MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#include "VEInstPrinter.h" +#include "VE.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +#define DEBUG_TYPE "asm-printer" + +// The generated AsmMatcher VEGenAsmWriter uses "VE" as the target +// namespace. But SPARC backend uses "SP" as its namespace. +namespace llvm { +namespace VE { +using namespace VE; +} +} // namespace llvm + +#define GET_INSTRUCTION_NAME +#define PRINT_ALIAS_INSTR +#include "VEGenAsmWriter.inc" + +void VEInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { + OS << '%' << StringRef(getRegisterName(RegNo)).lower(); +} + +void VEInstPrinter::printInst(const MCInst *MI, raw_ostream &O, StringRef Annot, + const MCSubtargetInfo &STI) { + if (!printAliasInstr(MI, STI, O) && !printVEAliasInstr(MI, STI, O)) + printInstruction(MI, STI, O); + printAnnotation(O, Annot); +} + +bool VEInstPrinter::printVEAliasInstr(const MCInst *MI, + const MCSubtargetInfo &STI, + raw_ostream &O) { + switch (MI->getOpcode()) { + default: + return false; + } +} + +void VEInstPrinter::printOperand(const MCInst *MI, int opNum, + const MCSubtargetInfo &STI, raw_ostream &O) { + const MCOperand &MO = MI->getOperand(opNum); + + if (MO.isReg()) { + printRegName(O, MO.getReg()); + return; + } + + if (MO.isImm()) { + switch (MI->getOpcode()) { + default: + O << (int)MO.getImm(); + return; + } + } + + assert(MO.isExpr() && "Unknown operand kind in printOperand"); + MO.getExpr()->print(O, &MAI); +} + +void VEInstPrinter::printMemASXOperand(const MCInst *MI, int opNum, + const MCSubtargetInfo &STI, + raw_ostream &O, const char *Modifier) { + // If this is an ADD operand, emit it like normal operands. + if (Modifier && !strcmp(Modifier, "arith")) { + printOperand(MI, opNum, STI, O); + O << ", "; + printOperand(MI, opNum + 1, STI, O); + return; + } + + const MCOperand &MO = MI->getOperand(opNum + 1); + if (MO.isImm() && MO.getImm() == 0) { + // don't print "+0" + } else { + printOperand(MI, opNum + 1, STI, O); + } + O << "(,"; + printOperand(MI, opNum, STI, O); + O << ")"; +} + +void VEInstPrinter::printMemASOperand(const MCInst *MI, int opNum, + const MCSubtargetInfo &STI, + raw_ostream &O, const char *Modifier) { + // If this is an ADD operand, emit it like normal operands. + if (Modifier && !strcmp(Modifier, "arith")) { + printOperand(MI, opNum, STI, O); + O << ", "; + printOperand(MI, opNum + 1, STI, O); + return; + } + + const MCOperand &MO = MI->getOperand(opNum + 1); + if (MO.isImm() && MO.getImm() == 0) { + // don't print "+0" + } else { + printOperand(MI, opNum + 1, STI, O); + } + O << "("; + printOperand(MI, opNum, STI, O); + O << ")"; +} + +void VEInstPrinter::printCCOperand(const MCInst *MI, int opNum, + const MCSubtargetInfo &STI, raw_ostream &O) { + int CC = (int)MI->getOperand(opNum).getImm(); + O << VECondCodeToString((VECC::CondCodes)CC); +} + +bool VEInstPrinter::printGetGOT(const MCInst *MI, unsigned opNum, + const MCSubtargetInfo &STI, raw_ostream &O) { + llvm_unreachable("FIXME: Implement VEInstPrinter::printGetGOT."); + return true; +} diff --git a/llvm/lib/Target/VE/LLVMBuild.txt b/llvm/lib/Target/VE/LLVMBuild.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/LLVMBuild.txt @@ -0,0 +1,34 @@ +;===- ./lib/Target/VE/LLVMBuild.txt ----------------------------*- Conf -*--===; +; +; Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +; See https://llvm.org/LICENSE.txt for license information. +; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[common] +subdirectories = AsmParser InstPrinter MCTargetDesc TargetInfo + +[component_0] +type = TargetGroup +name = VE +parent = Target +has_asmparser = 1 +has_asmprinter = 1 + +[component_1] +type = Library +name = VECodeGen +parent = VE +required_libraries = Analysis AsmPrinter CodeGen Core + MC SelectionDAG VEAsmPrinter + VEDesc VEInfo Support Target +add_to_library_groups = VE diff --git a/llvm/lib/Target/VE/LVLGen.cpp b/llvm/lib/Target/VE/LVLGen.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/LVLGen.cpp @@ -0,0 +1,124 @@ +//===-- LVLGen.cpp - LVL instruction generator ----------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "VE.h" +#include "VESubtarget.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "lvl-gen" + +namespace { + int getVLIndex(unsigned op) { + switch (op) { +#include "vl-index.inc" + case VE::vor_v1vl: return 3; + } + } + + struct LVLGen : public MachineFunctionPass { + MCInstrDesc LVLInstDesc; + static char ID; + LVLGen() : MachineFunctionPass(ID) {} + bool runOnMachineBasicBlock(MachineBasicBlock &MBB); + bool runOnMachineFunction(MachineFunction &F) override; + }; + char LVLGen::ID = 0; + + // returns a register holding a vector length. NoRegister is returned when + // this MI does not have a vector length. + unsigned getVL(const MachineInstr &MI) + { + int index = getVLIndex(MI.getOpcode()); + if (index >= 0) + return MI.getOperand(index).getReg(); + + return VE::NoRegister; + } + +} // end of anonymous namespace + +FunctionPass *llvm::createLVLGenPass() { + return new LVLGen; +} + +bool LVLGen::runOnMachineBasicBlock(MachineBasicBlock &MBB) +{ +#define RegName(no) \ + (MBB.getParent()->getSubtarget().getRegisterInfo()->getName(no)) + + bool Changed = false; + bool hasRegForVL = false; + unsigned RegForVL; + + for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) { + MachineBasicBlock::iterator MI = I; + + unsigned Reg = getVL(*MI); + if (Reg != VE::NoRegister) { + LLVM_DEBUG(dbgs() << "Vector instruction found: "); + LLVM_DEBUG(MI->dump()); + LLVM_DEBUG(dbgs() << "Vector length is " << RegName(Reg) << ". "); + LLVM_DEBUG(dbgs() << "Current VL is " + << (hasRegForVL ? RegName(RegForVL) : "unknown") << ". "); + + if (!hasRegForVL || RegForVL != Reg) { + LLVM_DEBUG(dbgs() << "Generate a LVL instruction to load " + << RegName(Reg) << ".\n"); + BuildMI(MBB, I, MI->getDebugLoc(), LVLInstDesc).addReg(Reg); + hasRegForVL = true; + RegForVL = Reg; + Changed = true; + } else { + LLVM_DEBUG(dbgs() << "Reuse current VL.\n"); + } + } else if (hasRegForVL) { + for (const MachineOperand &MO : MI->defs()) { + if (MO.isReg() && MO.getReg() == RegForVL) { + LLVM_DEBUG(dbgs() << RegName(RegForVL) << " is killed: "); + LLVM_DEBUG(MI->dump()); + hasRegForVL = false; + break; + } + } + } + + ++I; + } + return Changed; +} + +bool LVLGen::runOnMachineFunction(MachineFunction &F) +{ + LLVM_DEBUG(dbgs() << "********** Begin LVLGen **********\n"); + LLVM_DEBUG(dbgs() << "********** Function: " << F.getName() << '\n'); + LLVM_DEBUG(F.dump()); + + bool Changed = false; + + const VESubtarget& Subtarget = F.getSubtarget(); + const TargetInstrInfo *TII = Subtarget.getInstrInfo(); + LVLInstDesc = TII->get(VE::LVL); + + for (MachineFunction::iterator FI = F.begin(), FE = F.end(); + FI != FE; ++FI) + Changed |= runOnMachineBasicBlock(*FI); + + if (Changed) { + LLVM_DEBUG(dbgs() << "\n"); + LLVM_DEBUG(F.dump()); + } + LLVM_DEBUG(dbgs() << "********** End LVLGen **********\n"); + return Changed; +} + diff --git a/llvm/lib/Target/VE/MCTargetDesc/CMakeLists.txt b/llvm/lib/Target/VE/MCTargetDesc/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/CMakeLists.txt @@ -0,0 +1,6 @@ +add_llvm_component_library(LLVMVEDesc + VEMCAsmInfo.cpp + VEMCExpr.cpp + VEMCTargetDesc.cpp + VETargetStreamer.cpp + ) diff --git a/llvm/lib/Target/VE/MCTargetDesc/LLVMBuild.txt b/llvm/lib/Target/VE/MCTargetDesc/LLVMBuild.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/LLVMBuild.txt @@ -0,0 +1,22 @@ +;===- ./lib/Target/VE/MCTargetDesc/LLVMBuild.txt ---------------*- Conf -*--===; +; +; Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +; See https://llvm.org/LICENSE.txt for license information. +; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = VEDesc +parent = VE +required_libraries = MC VEAsmPrinter VEInfo Support +add_to_library_groups = VE diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEFixupKinds.h b/llvm/lib/Target/VE/MCTargetDesc/VEFixupKinds.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEFixupKinds.h @@ -0,0 +1,73 @@ +//===-- VEFixupKinds.h - VE Specific Fixup Entries --------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_MCTARGETDESC_VEFIXUPKINDS_H +#define LLVM_LIB_TARGET_VE_MCTARGETDESC_VEFIXUPKINDS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { +namespace VE { +enum Fixups { + // fixup_ve_call30 - 30-bit PC relative relocation for call + fixup_ve_call30 = FirstTargetFixupKind, + + /// fixup_ve_br22 - 22-bit PC relative relocation for + /// branches + fixup_ve_br22, + + /// fixup_ve_br19 - 19-bit PC relative relocation for + /// branches on icc/xcc + fixup_ve_br19, + + /// fixup_ve_bpr - 16-bit fixup for bpr + fixup_ve_br16_2, + fixup_ve_br16_14, + + /// fixup_ve_hi32 - 32-bit fixup corresponding to foo@hi + fixup_ve_hi32, + + /// fixup_ve_lo32 - 32-bit fixup corresponding to foo@lo + fixup_ve_lo32, + + /// fixup_ve_pc_hi32 - 32-bit fixup corresponding to foo@pc_hi + fixup_ve_pc_hi32, + + /// fixup_ve_pc_lo32 - 32-bit fixup corresponding to foo@pc_lo + fixup_ve_pc_lo32, + + /// fixup_ve_got_hi32 - 32-bit fixup corresponding to foo@got_hi + fixup_ve_got_hi32, + + /// fixup_ve_got_lo32 - 32-bit fixup corresponding to foo@got_lo + fixup_ve_got_lo32, + + /// fixup_ve_gotoff_hi32 - 32-bit fixup corresponding to foo@gotoff_hi + fixup_ve_gotoff_hi32, + + /// fixup_ve_gotoff_lo32 - 32-bit fixup corresponding to foo@gotoff_lo + fixup_ve_gotoff_lo32, + + /// fixup_ve_plt_hi32/lo32 + fixup_ve_plt_hi32, + fixup_ve_plt_lo32, + + /// fixups for Thread Local Storage + fixup_ve_tls_gd_hi32, + fixup_ve_tls_gd_lo32, + fixup_ve_tpoff_hi32, + fixup_ve_tpoff_lo32, + + // Marker + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind +}; +} // namespace VE +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCAsmInfo.h b/llvm/lib/Target/VE/MCTargetDesc/VEMCAsmInfo.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCAsmInfo.h @@ -0,0 +1,37 @@ +//===- VEMCAsmInfo.h - VE asm properties -----------------------*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the VEMCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCASMINFO_H +#define LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCASMINFO_H + +#include "llvm/MC/MCAsmInfoELF.h" + +namespace llvm { + +class Triple; + +class VEELFMCAsmInfo : public MCAsmInfoELF { + void anchor() override; + +public: + explicit VEELFMCAsmInfo(const Triple &TheTriple); + + const MCExpr * + getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding, + MCStreamer &Streamer) const override; + const MCExpr *getExprForFDESymbol(const MCSymbol *Sym, unsigned Encoding, + MCStreamer &Streamer) const override; +}; + +} // namespace llvm + +#endif // LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCASMINFO_H diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCAsmInfo.cpp b/llvm/lib/Target/VE/MCTargetDesc/VEMCAsmInfo.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCAsmInfo.cpp @@ -0,0 +1,69 @@ +//===- VEMCAsmInfo.cpp - VE asm properties --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the VEMCAsmInfo properties. +// +//===----------------------------------------------------------------------===// + +#include "VEMCAsmInfo.h" +#include "VEMCExpr.h" +#include "llvm/ADT/Triple.h" +#include "llvm/BinaryFormat/Dwarf.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCTargetOptions.h" + +using namespace llvm; + +void VEELFMCAsmInfo::anchor() {} + +VEELFMCAsmInfo::VEELFMCAsmInfo(const Triple &TheTriple) { + + CodePointerSize = CalleeSaveStackSlotSize = 8; + MaxInstLength = MinInstAlignment = 8; + + // VE has ".zero" directive although it is not listed in assembler manual. + // ZeroDirective = nullptr; + + // VE uses ".*byte" directive for unaligned data. + Data8bitsDirective = "\t.byte\t"; + Data16bitsDirective = "\t.2byte\t"; + Data32bitsDirective = "\t.4byte\t"; + Data64bitsDirective = "\t.8byte\t"; + + // Uses '.section' before '.bss' directive. VE requires this although + // assembler manual says sinple '.bss' is supported. + UsesELFSectionDirectiveForBSS = true; + + // ExceptionsType = ExceptionHandling::DwarfCFI; + SupportsDebugInformation = true; + // SunStyleELFSectionSwitchSyntax = true; + // UseIntegratedAssembler = true; +} + +const MCExpr *VEELFMCAsmInfo::getExprForPersonalitySymbol( + const MCSymbol *Sym, unsigned Encoding, MCStreamer &Streamer) const { + if (Encoding & dwarf::DW_EH_PE_pcrel) { + MCContext &Ctx = Streamer.getContext(); + return VEMCExpr::create(VEMCExpr::VK_VE_R_DISP32, + MCSymbolRefExpr::create(Sym, Ctx), Ctx); + } + + return MCAsmInfo::getExprForPersonalitySymbol(Sym, Encoding, Streamer); +} + +const MCExpr *VEELFMCAsmInfo::getExprForFDESymbol(const MCSymbol *Sym, + unsigned Encoding, + MCStreamer &Streamer) const { + if (Encoding & dwarf::DW_EH_PE_pcrel) { + MCContext &Ctx = Streamer.getContext(); + return VEMCExpr::create(VEMCExpr::VK_VE_R_DISP32, + MCSymbolRefExpr::create(Sym, Ctx), Ctx); + } + return MCAsmInfo::getExprForFDESymbol(Sym, Encoding, Streamer); +} diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCExpr.h b/llvm/lib/Target/VE/MCTargetDesc/VEMCExpr.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCExpr.h @@ -0,0 +1,95 @@ +//====- VEMCExpr.h - VE specific MC expression classes --------*- C++ -*-=====// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes VE-specific MCExprs, used for modifiers like +// "%hi" or "%lo" etc., +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCEXPR_H +#define LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCEXPR_H + +#include "VEFixupKinds.h" +#include "llvm/MC/MCExpr.h" + +namespace llvm { + +class StringRef; +class VEMCExpr : public MCTargetExpr { +public: + enum VariantKind { + VK_VE_None, + VK_VE_R_DISP32, + VK_VE_HI32, + VK_VE_LO32, + VK_VE_PC_HI32, + VK_VE_PC_LO32, + VK_VE_GOT_HI32, + VK_VE_GOT_LO32, + VK_VE_GOTOFF_HI32, + VK_VE_GOTOFF_LO32, + VK_VE_PLT_HI32, + VK_VE_PLT_LO32, + VK_VE_TLS_GD_HI32, + VK_VE_TLS_GD_LO32, + VK_VE_TPOFF_HI32, + VK_VE_TPOFF_LO32, + }; + +private: + const VariantKind Kind; + const MCExpr *Expr; + + explicit VEMCExpr(VariantKind Kind, const MCExpr *Expr) + : Kind(Kind), Expr(Expr) {} + +public: + /// @name Construction + /// @{ + + static const VEMCExpr *create(VariantKind Kind, const MCExpr *Expr, + MCContext &Ctx); + /// @} + /// @name Accessors + /// @{ + + /// getOpcode - Get the kind of this expression. + VariantKind getKind() const { return Kind; } + + /// getSubExpr - Get the child of this expression. + const MCExpr *getSubExpr() const { return Expr; } + + /// getFixupKind - Get the fixup kind of this expression. + VE::Fixups getFixupKind() const { return getFixupKind(Kind); } + + /// @} + void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override; + bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout, + const MCFixup *Fixup) const override; + void visitUsedExpr(MCStreamer &Streamer) const override; + MCFragment *findAssociatedFragment() const override { + return getSubExpr()->findAssociatedFragment(); + } + + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override; + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } + + static bool classof(const VEMCExpr *) { return true; } + + static VariantKind parseVariantKind(StringRef name); + static bool printVariantKind(raw_ostream &OS, VariantKind Kind); + static void printVariantKindSuffix(raw_ostream &OS, VariantKind Kind); + static VE::Fixups getFixupKind(VariantKind Kind); +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCExpr.cpp b/llvm/lib/Target/VE/MCTargetDesc/VEMCExpr.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCExpr.cpp @@ -0,0 +1,229 @@ +//===-- VEMCExpr.cpp - VE specific MC expression classes ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation of the assembly expression modifiers +// accepted by the VE architecture (e.g. "%hi", "%lo", ...). +// +//===----------------------------------------------------------------------===// + +#include "VEMCExpr.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCObjectStreamer.h" +#include "llvm/MC/MCSymbolELF.h" +#include "llvm/Object/ELF.h" + +using namespace llvm; + +#define DEBUG_TYPE "vemcexpr" + +const VEMCExpr *VEMCExpr::create(VariantKind Kind, const MCExpr *Expr, + MCContext &Ctx) { + return new (Ctx) VEMCExpr(Kind, Expr); +} + +void VEMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { + + bool closeParen = printVariantKind(OS, Kind); + + const MCExpr *Expr = getSubExpr(); + Expr->print(OS, MAI); + + if (closeParen) + OS << ')'; + printVariantKindSuffix(OS, Kind); +} + +bool VEMCExpr::printVariantKind(raw_ostream &OS, VariantKind Kind) { + bool closeParen = true; + switch (Kind) { + case VK_VE_None: + closeParen = false; + break; + case VK_VE_R_DISP32: + OS << "%r_disp32("; + break; + case VK_VE_HI32: + return false; // OS << "%hi("; break; + case VK_VE_LO32: + return false; // OS << "%lo("; break; + case VK_VE_PC_HI32: + return false; // OS << "%pc_hi("; break; + case VK_VE_PC_LO32: + return false; // OS << "%pc_lo("; break; + case VK_VE_GOT_HI32: + return false; // OS << "%got_hi("; break; + case VK_VE_GOT_LO32: + return false; // OS << "%got_lo("; break; + case VK_VE_GOTOFF_HI32: + return false; // OS << "%gotoff_hi("; break; + case VK_VE_GOTOFF_LO32: + return false; // OS << "%gotoff_lo("; break; + case VK_VE_PLT_HI32: + return false; // OS << "%plt_hi("; break; + case VK_VE_PLT_LO32: + return false; // OS << "%plt_lo("; break; + case VK_VE_TLS_GD_HI32: + return false; // OS << "%tls_gd_hi("; break; + case VK_VE_TLS_GD_LO32: + return false; // OS << "%tls_gd_lo("; break; + case VK_VE_TPOFF_HI32: + return false; // OS << "%tpoff_hi("; break; + case VK_VE_TPOFF_LO32: + return false; // OS << "%tpoff_lo("; break; + } + return closeParen; +} + +void VEMCExpr::printVariantKindSuffix(raw_ostream &OS, VariantKind Kind) { + switch (Kind) { + case VK_VE_None: + break; + case VK_VE_R_DISP32: + break; + case VK_VE_HI32: + OS << "@hi"; + break; + case VK_VE_LO32: + OS << "@lo"; + break; + case VK_VE_PC_HI32: + OS << "@pc_hi"; + break; + case VK_VE_PC_LO32: + OS << "@pc_lo"; + break; + case VK_VE_GOT_HI32: + OS << "@got_hi"; + break; + case VK_VE_GOT_LO32: + OS << "@got_lo"; + break; + case VK_VE_GOTOFF_HI32: + OS << "@gotoff_hi"; + break; + case VK_VE_GOTOFF_LO32: + OS << "@gotoff_lo"; + break; + case VK_VE_PLT_HI32: + OS << "@plt_hi"; + break; + case VK_VE_PLT_LO32: + OS << "@plt_lo"; + break; + case VK_VE_TLS_GD_HI32: + OS << "@tls_gd_hi"; + break; + case VK_VE_TLS_GD_LO32: + OS << "@tls_gd_lo"; + break; + case VK_VE_TPOFF_HI32: + OS << "@tpoff_hi"; + break; + case VK_VE_TPOFF_LO32: + OS << "@tpoff_lo"; + break; + } +} + +VEMCExpr::VariantKind VEMCExpr::parseVariantKind(StringRef name) { + return StringSwitch(name) + .Case("r_disp32", VK_VE_R_DISP32) + .Case("hi", VK_VE_HI32) + .Case("lo", VK_VE_LO32) + .Case("pc_hi", VK_VE_PC_HI32) + .Case("pc_lo", VK_VE_PC_LO32) + .Case("got_hi", VK_VE_GOT_HI32) + .Case("got_lo", VK_VE_GOT_LO32) + .Case("gotoff_hi", VK_VE_GOTOFF_HI32) + .Case("gotoff_lo", VK_VE_GOTOFF_LO32) + .Case("plt_hi", VK_VE_PLT_HI32) + .Case("plt_lo", VK_VE_PLT_LO32) + .Case("tls_gd_hi", VK_VE_TLS_GD_HI32) + .Case("tls_gd_lo", VK_VE_TLS_GD_LO32) + .Case("tpoff_hi", VK_VE_TPOFF_HI32) + .Case("tpoff_lo", VK_VE_TPOFF_LO32) + .Default(VK_VE_None); +} + +VE::Fixups VEMCExpr::getFixupKind(VEMCExpr::VariantKind Kind) { + switch (Kind) { + default: + llvm_unreachable("Unhandled VEMCExpr::VariantKind"); + case VK_VE_HI32: + return VE::fixup_ve_hi32; + case VK_VE_LO32: + return VE::fixup_ve_lo32; + case VK_VE_PC_HI32: + return VE::fixup_ve_pc_hi32; + case VK_VE_PC_LO32: + return VE::fixup_ve_pc_lo32; + case VK_VE_GOT_HI32: + return VE::fixup_ve_got_hi32; + case VK_VE_GOT_LO32: + return VE::fixup_ve_got_lo32; + case VK_VE_GOTOFF_HI32: + return VE::fixup_ve_gotoff_hi32; + case VK_VE_GOTOFF_LO32: + return VE::fixup_ve_gotoff_lo32; + case VK_VE_PLT_HI32: + return VE::fixup_ve_plt_hi32; + case VK_VE_PLT_LO32: + return VE::fixup_ve_plt_lo32; + case VK_VE_TLS_GD_HI32: + return VE::fixup_ve_tls_gd_hi32; + case VK_VE_TLS_GD_LO32: + return VE::fixup_ve_tls_gd_lo32; + case VK_VE_TPOFF_HI32: + return VE::fixup_ve_tpoff_hi32; + case VK_VE_TPOFF_LO32: + return VE::fixup_ve_tpoff_lo32; + } +} + +bool VEMCExpr::evaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout, + const MCFixup *Fixup) const { + return getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup); +} + +static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) { + switch (Expr->getKind()) { + case MCExpr::Target: + llvm_unreachable("Can't handle nested target expr!"); + break; + + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast(Expr); + fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm); + fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm); + break; + } + + case MCExpr::SymbolRef: { + const MCSymbolRefExpr &SymRef = *cast(Expr); + cast(SymRef.getSymbol()).setType(ELF::STT_TLS); + break; + } + + case MCExpr::Unary: + fixELFSymbolsInTLSFixupsImpl(cast(Expr)->getSubExpr(), Asm); + break; + } +} + +void VEMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { + fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm); +} + +void VEMCExpr::visitUsedExpr(MCStreamer &Streamer) const { + Streamer.visitUsedExpr(*getSubExpr()); +} diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h b/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.h @@ -0,0 +1,53 @@ +//===-- VEMCTargetDesc.h - VE Target Descriptions ---------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides VE specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCTARGETDESC_H +#define LLVM_LIB_TARGET_VE_MCTARGETDESC_VEMCTARGETDESC_H + +#include "llvm/Support/DataTypes.h" + +#include + +namespace llvm { +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCObjectWriter; +class MCRegisterInfo; +class MCSubtargetInfo; +class MCTargetOptions; +class Target; +class Triple; +class StringRef; +class raw_pwrite_stream; +class raw_ostream; + +Target &getTheVETarget(); + +} // namespace llvm + +// Defines symbolic names for VE registers. This defines a mapping from +// register name to register number. +// +#define GET_REGINFO_ENUM +#include "VEGenRegisterInfo.inc" + +// Defines symbolic names for the VE instructions. +// +#define GET_INSTRINFO_ENUM +#include "VEGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_ENUM +#include "VEGenSubtargetInfo.inc" + +#endif diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.cpp b/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VEMCTargetDesc.cpp @@ -0,0 +1,106 @@ +//===-- VEMCTargetDesc.cpp - VE Target Descriptions -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides VE specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#include "VEMCTargetDesc.h" +#include "InstPrinter/VEInstPrinter.h" +#include "VEMCAsmInfo.h" +#include "VETargetStreamer.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define GET_INSTRINFO_MC_DESC +#include "VEGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "VEGenSubtargetInfo.inc" + +#define GET_REGINFO_MC_DESC +#include "VEGenRegisterInfo.inc" + +static MCAsmInfo *createVEMCAsmInfo(const MCRegisterInfo &MRI, const Triple &TT, + const MCTargetOptions &Options) { + MCAsmInfo *MAI = new VEELFMCAsmInfo(TT); + unsigned Reg = MRI.getDwarfRegNum(VE::SX11, true); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0); + MAI->addInitialFrameState(Inst); + return MAI; +} + +static MCInstrInfo *createVEMCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitVEMCInstrInfo(X); + return X; +} + +static MCRegisterInfo *createVEMCRegisterInfo(const Triple &TT) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitVEMCRegisterInfo(X, VE::SX10); + return X; +} + +static MCSubtargetInfo *createVEMCSubtargetInfo(const Triple &TT, StringRef CPU, + StringRef FS) { + if (CPU.empty()) + CPU = "ve"; + return createVEMCSubtargetInfoImpl(TT, CPU, FS); +} + +static MCTargetStreamer * +createObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { + return new VETargetELFStreamer(S); +} + +static MCTargetStreamer *createTargetAsmStreamer(MCStreamer &S, + formatted_raw_ostream &OS, + MCInstPrinter *InstPrint, + bool isVerboseAsm) { + return new VETargetAsmStreamer(S, OS); +} + +static MCInstPrinter *createVEMCInstPrinter(const Triple &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI) { + return new VEInstPrinter(MAI, MII, MRI); +} + +extern "C" void LLVMInitializeVETargetMC() { + // Register the MC asm info. + RegisterMCAsmInfoFn X(getTheVETarget(), createVEMCAsmInfo); + + for (Target *T : {&getTheVETarget()}) { + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(*T, createVEMCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(*T, createVEMCRegisterInfo); + + // Register the MC subtarget info. + TargetRegistry::RegisterMCSubtargetInfo(*T, createVEMCSubtargetInfo); + + // Register the object target streamer. + TargetRegistry::RegisterObjectTargetStreamer(*T, + createObjectTargetStreamer); + + // Register the asm streamer. + TargetRegistry::RegisterAsmTargetStreamer(*T, createTargetAsmStreamer); + + // Register the MCInstPrinter + TargetRegistry::RegisterMCInstPrinter(*T, createVEMCInstPrinter); + } +} diff --git a/llvm/lib/Target/VE/MCTargetDesc/VETargetStreamer.h b/llvm/lib/Target/VE/MCTargetDesc/VETargetStreamer.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VETargetStreamer.h @@ -0,0 +1,47 @@ +//===-- VETargetStreamer.h - VE Target Streamer ----------------*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_SPARC_SPARCTARGETSTREAMER_H +#define LLVM_LIB_TARGET_SPARC_SPARCTARGETSTREAMER_H + +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/MC/MCStreamer.h" + +namespace llvm { +class VETargetStreamer : public MCTargetStreamer { + virtual void anchor(); + +public: + VETargetStreamer(MCStreamer &S); + /// Emit ".register , #ignore". + virtual void emitVERegisterIgnore(unsigned reg) = 0; + /// Emit ".register , #scratch". + virtual void emitVERegisterScratch(unsigned reg) = 0; +}; + +// This part is for ascii assembly output +class VETargetAsmStreamer : public VETargetStreamer { + formatted_raw_ostream &OS; + +public: + VETargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS); + void emitVERegisterIgnore(unsigned reg) override; + void emitVERegisterScratch(unsigned reg) override; +}; + +// This part is for ELF object output +class VETargetELFStreamer : public VETargetStreamer { +public: + VETargetELFStreamer(MCStreamer &S); + MCELFStreamer &getStreamer(); + void emitVERegisterIgnore(unsigned reg) override {} + void emitVERegisterScratch(unsigned reg) override {} +}; +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/MCTargetDesc/VETargetStreamer.cpp b/llvm/lib/Target/VE/MCTargetDesc/VETargetStreamer.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/MCTargetDesc/VETargetStreamer.cpp @@ -0,0 +1,44 @@ +//===-- VETargetStreamer.cpp - VE Target Streamer Methods -----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides VE specific target streamer methods. +// +//===----------------------------------------------------------------------===// + +#include "VETargetStreamer.h" +#include "InstPrinter/VEInstPrinter.h" +#include "llvm/Support/FormattedStream.h" + +using namespace llvm; + +// pin vtable to this file +VETargetStreamer::VETargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {} + +void VETargetStreamer::anchor() {} + +VETargetAsmStreamer::VETargetAsmStreamer(MCStreamer &S, + formatted_raw_ostream &OS) + : VETargetStreamer(S), OS(OS) {} + +void VETargetAsmStreamer::emitVERegisterIgnore(unsigned reg) { + OS << "\t.register " + << "%" << StringRef(VEInstPrinter::getRegisterName(reg)).lower() + << ", #ignore\n"; +} + +void VETargetAsmStreamer::emitVERegisterScratch(unsigned reg) { + OS << "\t.register " + << "%" << StringRef(VEInstPrinter::getRegisterName(reg)).lower() + << ", #scratch\n"; +} + +VETargetELFStreamer::VETargetELFStreamer(MCStreamer &S) : VETargetStreamer(S) {} + +MCELFStreamer &VETargetELFStreamer::getStreamer() { + return static_cast(Streamer); +} diff --git a/llvm/lib/Target/VE/TargetInfo/CMakeLists.txt b/llvm/lib/Target/VE/TargetInfo/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/TargetInfo/CMakeLists.txt @@ -0,0 +1,3 @@ +add_llvm_component_library(LLVMVEInfo + VETargetInfo.cpp + ) diff --git a/llvm/lib/Target/VE/TargetInfo/LLVMBuild.txt b/llvm/lib/Target/VE/TargetInfo/LLVMBuild.txt new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/TargetInfo/LLVMBuild.txt @@ -0,0 +1,22 @@ +;===- ./lib/Target/VE/TargetInfo/LLVMBuild.txt -----------------*- Conf -*--===; +; +; Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +; See https://llvm.org/LICENSE.txt for license information. +; SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = VEInfo +parent = VE +required_libraries = Support +add_to_library_groups = VE diff --git a/llvm/lib/Target/VE/TargetInfo/VETargetInfo.cpp b/llvm/lib/Target/VE/TargetInfo/VETargetInfo.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/TargetInfo/VETargetInfo.cpp @@ -0,0 +1,23 @@ +//===-- VETargetInfo.cpp - VE Target Implementation -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "VE.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +Target &llvm::getTheVETarget() { + static Target TheVETarget; + return TheVETarget; +} + +extern "C" void LLVMInitializeVETargetInfo() { + RegisterTarget X(getTheVETarget(), "ve", + "VE", "VE"); +} diff --git a/llvm/lib/Target/VE/VE.h b/llvm/lib/Target/VE/VE.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VE.h @@ -0,0 +1,129 @@ +//===-- VE.h - Top-level interface for VE representation --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in the LLVM +// VE back-end. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VE_H +#define LLVM_LIB_TARGET_VE_VE_H + +#include "MCTargetDesc/VEMCTargetDesc.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { +class FunctionPass; +class VETargetMachine; +class formatted_raw_ostream; +class AsmPrinter; +class MCInst; +class MachineInstr; + +FunctionPass *createVEISelDag(VETargetMachine &TM); +FunctionPass *createVEPromoteToI1Pass(); +FunctionPass *createLVLGenPass(); + +void LowerVEMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI, + AsmPrinter &AP); +} // namespace llvm + +namespace llvm { +// Enums corresponding to VE condition codes, both icc's and fcc's. These +// values must be kept in sync with the ones in the .td file. +namespace VECC { +enum CondCodes { + // Integer comparison + CC_IG = 0, // Greater + CC_IL = 1, // Less + CC_INE = 2, // Not Equal + CC_IEQ = 3, // Equal + CC_IGE = 4, // Greater or Equal + CC_ILE = 5, // Less or Equal + + // Floating point comparison + CC_AF = 0 + 6, // Never + CC_G = 1 + 6, // Greater + CC_L = 2 + 6, // Less + CC_NE = 3 + 6, // Not Equal + CC_EQ = 4 + 6, // Equal + CC_GE = 5 + 6, // Greater or Equal + CC_LE = 6 + 6, // Less or Equal + CC_NUM = 7 + 6, // Number + CC_NAN = 8 + 6, // NaN + CC_GNAN = 9 + 6, // Greater or NaN + CC_LNAN = 10 + 6, // Less or NaN + CC_NENAN = 11 + 6, // Not Equal or NaN + CC_EQNAN = 12 + 6, // Equal or NaN + CC_GENAN = 13 + 6, // Greater or Equal or NaN + CC_LENAN = 14 + 6, // Less or Equal or NaN + CC_AT = 15 + 6, // Always +}; +} + +inline static const char *VECondCodeToString(VECC::CondCodes CC) { + switch (CC) { + case VECC::CC_IG: + return "gt"; + case VECC::CC_IL: + return "lt"; + case VECC::CC_INE: + return "ne"; + case VECC::CC_IEQ: + return "eq"; + case VECC::CC_IGE: + return "ge"; + case VECC::CC_ILE: + return "le"; + case VECC::CC_AF: + return "af"; + case VECC::CC_G: + return "gt"; + case VECC::CC_L: + return "lt"; + case VECC::CC_NE: + return "ne"; + case VECC::CC_EQ: + return "eq"; + case VECC::CC_GE: + return "ge"; + case VECC::CC_LE: + return "le"; + case VECC::CC_NUM: + return "num"; + case VECC::CC_NAN: + return "nan"; + case VECC::CC_GNAN: + return "gtnan"; + case VECC::CC_LNAN: + return "ltnan"; + case VECC::CC_NENAN: + return "nenan"; + case VECC::CC_EQNAN: + return "eqnan"; + case VECC::CC_GENAN: + return "genan"; + case VECC::CC_LENAN: + return "lenan"; + case VECC::CC_AT: + return "at"; + } + llvm_unreachable("Invalid cond code"); +} + +inline static unsigned HI32(int64_t imm) { + return (unsigned)((imm >> 32) & 0xFFFFFFFF); +} + +inline static unsigned LO32(int64_t imm) { + return (unsigned)(imm & 0xFFFFFFFF); +} + +} // namespace llvm +#endif diff --git a/llvm/lib/Target/VE/VE.td b/llvm/lib/Target/VE/VE.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VE.td @@ -0,0 +1,69 @@ +//===-- VE.td - Describe the VE Target Machine -------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Target-independent interfaces which we are implementing +//===----------------------------------------------------------------------===// + +include "llvm/Target/Target.td" + +//===----------------------------------------------------------------------===// +// VE Subtarget features. +// + +def FeatureVectorize + : SubtargetFeature<"vec", "Vectorize", "true", + "Try to vectorize (experimental)">; + + +//===----------------------------------------------------------------------===// +// Register File, Calling Conv, Instruction Descriptions +//===----------------------------------------------------------------------===// + +include "VERegisterInfo.td" +include "VECallingConv.td" +include "VESchedule.td" +include "VEInstrInfo.td" + +def VEInstrInfo : InstrInfo; + +def VEAsmParser : AsmParser { + bit ShouldEmitMatchRegisterName = 0; +} + +//===----------------------------------------------------------------------===// +// VE processors supported. +//===----------------------------------------------------------------------===// + +class Proc Features> + : Processor; + +def : Proc<"ve", []>; +// def : Processor<"ve", VEItinerary, []>; +// def : ProcessorModel<"ve", VESchedModel, []>; + +//===----------------------------------------------------------------------===// +// Declare the target which we are implementing +//===----------------------------------------------------------------------===// + +def VEAsmWriter : AsmWriter { + string AsmWriterClassName = "InstPrinter"; + int PassSubtarget = 1; + int Variant = 0; +} + +def VE : Target { + // Pull in Instruction Info: + let InstructionSet = VEInstrInfo; + let AssemblyParsers = [VEAsmParser]; + let AssemblyWriters = [VEAsmWriter]; + let AllowRegisterRenaming = 1; +} diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp @@ -0,0 +1,550 @@ +//===-- VEAsmPrinter.cpp - VE LLVM assembly writer ------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal representation +// of machine-dependent LLVM code to GAS-format SPARC assembly language. +// +//===----------------------------------------------------------------------===// + +#include "InstPrinter/VEInstPrinter.h" +#include "MCTargetDesc/VEMCExpr.h" +#include "MCTargetDesc/VETargetStreamer.h" +#include "VE.h" +#include "VEInstrInfo.h" +#include "VETargetMachine.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineModuleInfoImpls.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstBuilder.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +#define DEBUG_TYPE "asm-printer" + +namespace { +class VEAsmPrinter : public AsmPrinter { + VETargetStreamer &getTargetStreamer() { + return static_cast(*OutStreamer->getTargetStreamer()); + } + +public: + explicit VEAsmPrinter(TargetMachine &TM, std::unique_ptr Streamer) + : AsmPrinter(TM, std::move(Streamer)) {} + + StringRef getPassName() const override { return "VE Assembly Printer"; } + + void printOperand(const MachineInstr *MI, int opNum, raw_ostream &OS); + void printMemASXOperand(const MachineInstr *MI, int opNum, raw_ostream &OS, + const char *Modifier = nullptr); + void printMemASOperand(const MachineInstr *MI, int opNum, raw_ostream &OS, + const char *Modifier = nullptr); + + void EmitFunctionBodyStart() override; + void EmitInstruction(const MachineInstr *MI) override; + + static const char *getRegisterName(unsigned RegNo) { + return VEInstPrinter::getRegisterName(RegNo); + } + + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; + + void LowerGETGOTAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI); + void LowerGETFunPLTAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI); + void LowerGETTLSAddrAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI); + void LowerEH_SJLJ_SETJMPAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI); + void LowerEH_SJLJ_LONGJMPAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI); +}; +} // end of anonymous namespace + +static MCOperand createVEMCOperand(VEMCExpr::VariantKind Kind, MCSymbol *Sym, + MCContext &OutContext) { + const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::create(Sym, OutContext); + const VEMCExpr *expr = VEMCExpr::create(Kind, MCSym, OutContext); + return MCOperand::createExpr(expr); +} + +static MCOperand createGOTRelExprOp(VEMCExpr::VariantKind Kind, + MCSymbol *GOTLabel, MCContext &OutContext) { + const MCSymbolRefExpr *GOT = MCSymbolRefExpr::create(GOTLabel, OutContext); + const VEMCExpr *expr = VEMCExpr::create(Kind, GOT, OutContext); + return MCOperand::createExpr(expr); +} + +static void EmitSIC(MCStreamer &OutStreamer, MCOperand &RD, + const MCSubtargetInfo &STI) { + MCInst SICInst; + SICInst.setOpcode(VE::SIC); + SICInst.addOperand(RD); + OutStreamer.EmitInstruction(SICInst, STI); +} + +static void EmitBSIC(MCStreamer &OutStreamer, MCOperand &R1, MCOperand &R2, + const MCSubtargetInfo &STI) { + MCInst BSICInst; + BSICInst.setOpcode(VE::BSIC); + BSICInst.addOperand(R1); + BSICInst.addOperand(R2); + OutStreamer.EmitInstruction(BSICInst, STI); +} + +static void EmitLEAzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD, + const MCSubtargetInfo &STI) { + MCInst LEAInst; + LEAInst.setOpcode(VE::LEAzzi); + LEAInst.addOperand(RD); + LEAInst.addOperand(Imm); + OutStreamer.EmitInstruction(LEAInst, STI); +} + +static void EmitLEASLzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD, + const MCSubtargetInfo &STI) { + MCInst LEASLInst; + LEASLInst.setOpcode(VE::LEASLzzi); + LEASLInst.addOperand(RD); + LEASLInst.addOperand(Imm); + OutStreamer.EmitInstruction(LEASLInst, STI); +} + +static void EmitLEAzii(MCStreamer &OutStreamer, MCOperand &RS1, MCOperand &Imm, + MCOperand &RD, const MCSubtargetInfo &STI) { + MCInst LEAInst; + LEAInst.setOpcode(VE::LEAzii); + LEAInst.addOperand(RD); + LEAInst.addOperand(RS1); + LEAInst.addOperand(Imm); + OutStreamer.EmitInstruction(LEAInst, STI); +} + +static void EmitLEASLrri(MCStreamer &OutStreamer, MCOperand &RS1, + MCOperand &RS2, MCOperand &Imm, MCOperand &RD, + const MCSubtargetInfo &STI) { + MCInst LEASLInst; + LEASLInst.setOpcode(VE::LEASLrri); + LEASLInst.addOperand(RS1); + LEASLInst.addOperand(RS2); + LEASLInst.addOperand(RD); + LEASLInst.addOperand(Imm); + OutStreamer.EmitInstruction(LEASLInst, STI); +} + +static void EmitBinary(MCStreamer &OutStreamer, unsigned Opcode, MCOperand &RS1, + MCOperand &Src2, MCOperand &RD, + const MCSubtargetInfo &STI) { + MCInst Inst; + Inst.setOpcode(Opcode); + Inst.addOperand(RD); + Inst.addOperand(RS1); + Inst.addOperand(Src2); + OutStreamer.EmitInstruction(Inst, STI); +} + +static void EmitANDrm0(MCStreamer &OutStreamer, MCOperand &RS1, MCOperand &Imm, + MCOperand &RD, const MCSubtargetInfo &STI) { + EmitBinary(OutStreamer, VE::ANDrm0, RS1, Imm, RD, STI); +} + +static void EmitHiLo(MCStreamer &OutStreamer, MCSymbol *GOTSym, + VEMCExpr::VariantKind HiKind, VEMCExpr::VariantKind LoKind, + MCOperand &RD, MCContext &OutContext, + const MCSubtargetInfo &STI) { + + MCOperand hi = createVEMCOperand(HiKind, GOTSym, OutContext); + MCOperand lo = createVEMCOperand(LoKind, GOTSym, OutContext); + MCOperand ci32 = MCOperand::createImm(32); + EmitLEAzzi(OutStreamer, lo, RD, STI); + EmitANDrm0(OutStreamer, RD, ci32, RD, STI); + EmitLEASLzzi(OutStreamer, hi, RD, STI); +} + +void VEAsmPrinter::LowerGETGOTAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI) { + MCSymbol *GOTLabel = + OutContext.getOrCreateSymbol(Twine("_GLOBAL_OFFSET_TABLE_")); + + const MachineOperand &MO = MI->getOperand(0); + MCOperand MCRegOP = MCOperand::createReg(MO.getReg()); + + if (!isPositionIndependent()) { + // Just load the address of GOT to MCRegOP. + switch (TM.getCodeModel()) { + default: + llvm_unreachable("Unsupported absolute code model"); + case CodeModel::Small: + case CodeModel::Medium: + case CodeModel::Large: + EmitHiLo(*OutStreamer, GOTLabel, VEMCExpr::VK_VE_HI32, + VEMCExpr::VK_VE_LO32, MCRegOP, OutContext, STI); + break; + } + return; + } + + MCOperand RegGOT = MCOperand::createReg(VE::SX15); // GOT + MCOperand RegPLT = MCOperand::createReg(VE::SX16); // PLT + + // lea %got, _GLOBAL_OFFSET_TABLE_@PC_LO(-24) + // and %got, %got, (32)0 + // sic %plt + // lea.sl %got, _GLOBAL_OFFSET_TABLE_@PC_HI(%got, %plt) + MCOperand cim24 = MCOperand::createImm(-24); + MCOperand loImm = + createGOTRelExprOp(VEMCExpr::VK_VE_PC_LO32, GOTLabel, OutContext); + EmitLEAzii(*OutStreamer, cim24, loImm, MCRegOP, STI); + MCOperand ci32 = MCOperand::createImm(32); + EmitANDrm0(*OutStreamer, MCRegOP, ci32, MCRegOP, STI); + EmitSIC(*OutStreamer, RegPLT, STI); + MCOperand hiImm = + createGOTRelExprOp(VEMCExpr::VK_VE_PC_HI32, GOTLabel, OutContext); + EmitLEASLrri(*OutStreamer, RegGOT, RegPLT, hiImm, MCRegOP, STI); +} + +void VEAsmPrinter::LowerGETFunPLTAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI) { + const MachineOperand &MO = MI->getOperand(0); + MCOperand MCRegOP = MCOperand::createReg(MO.getReg()); + const MachineOperand &Addr = MI->getOperand(1); + MCSymbol *AddrSym = nullptr; + + switch (Addr.getType()) { + default: + llvm_unreachable(""); + return; + case MachineOperand::MO_MachineBasicBlock: + report_fatal_error("MBB is not supporeted yet"); + return; + case MachineOperand::MO_ConstantPoolIndex: + report_fatal_error("ConstantPool is not supporeted yet"); + return; + case MachineOperand::MO_ExternalSymbol: + AddrSym = GetExternalSymbolSymbol(Addr.getSymbolName()); + break; + case MachineOperand::MO_GlobalAddress: + AddrSym = getSymbol(Addr.getGlobal()); + break; + } + + if (!isPositionIndependent()) { + llvm_unreachable("Unsupported uses of %plt in not PIC code"); + return; + } + + MCOperand RegPLT = MCOperand::createReg(VE::SX16); // PLT + + // lea %dst, %plt_lo(func)(-24) + // and %dst, %dst, (32)0 + // sic %plt ; FIXME: is it safe to use %plt here? + // lea.sl %dst, %plt_hi(func)(%dst, %plt) + MCOperand cim24 = MCOperand::createImm(-24); + MCOperand loImm = + createGOTRelExprOp(VEMCExpr::VK_VE_PLT_LO32, AddrSym, OutContext); + EmitLEAzii(*OutStreamer, cim24, loImm, MCRegOP, STI); + MCOperand ci32 = MCOperand::createImm(32); + EmitANDrm0(*OutStreamer, MCRegOP, ci32, MCRegOP, STI); + EmitSIC(*OutStreamer, RegPLT, STI); + MCOperand hiImm = + createGOTRelExprOp(VEMCExpr::VK_VE_PLT_HI32, AddrSym, OutContext); + EmitLEASLrri(*OutStreamer, MCRegOP, RegPLT, hiImm, MCRegOP, STI); +} + +void VEAsmPrinter::LowerGETTLSAddrAndEmitMCInsts(const MachineInstr *MI, + const MCSubtargetInfo &STI) { + const MachineOperand &Addr = MI->getOperand(0); + MCSymbol *AddrSym = nullptr; + + switch (Addr.getType()) { + default: + llvm_unreachable(""); + return; + case MachineOperand::MO_MachineBasicBlock: + report_fatal_error("MBB is not supporeted yet"); + return; + case MachineOperand::MO_ConstantPoolIndex: + report_fatal_error("ConstantPool is not supporeted yet"); + return; + case MachineOperand::MO_ExternalSymbol: + AddrSym = GetExternalSymbolSymbol(Addr.getSymbolName()); + break; + case MachineOperand::MO_GlobalAddress: + AddrSym = getSymbol(Addr.getGlobal()); + break; + } + + MCOperand RegLR = MCOperand::createReg(VE::SX10); // LR + MCOperand RegS0 = MCOperand::createReg(VE::SX0); // S0 + MCOperand RegS12 = MCOperand::createReg(VE::SX12); // S12 + MCSymbol *GetTLSLabel = OutContext.getOrCreateSymbol(Twine("__tls_get_addr")); + + // lea %s0, sym@tls_gd_lo(-24) + // and %s0, %s0, (32)0 + // sic %lr + // lea.sl %s0, sym@tls_gd_hi(%s0, %lr) + // lea %s12, __tls_get_addr@plt_lo(8) + // and %s12, %s12, (32)0 + // lea.sl %s12, __tls_get_addr@plt_hi(%s12, %lr) + // bsic %lr, (, %s12) + MCOperand cim24 = MCOperand::createImm(-24); + MCOperand loImm = + createGOTRelExprOp(VEMCExpr::VK_VE_TLS_GD_LO32, AddrSym, OutContext); + EmitLEAzii(*OutStreamer, cim24, loImm, RegS0, STI); + MCOperand ci32 = MCOperand::createImm(32); + EmitANDrm0(*OutStreamer, RegS0, ci32, RegS0, STI); + EmitSIC(*OutStreamer, RegLR, STI); + MCOperand hiImm = + createGOTRelExprOp(VEMCExpr::VK_VE_TLS_GD_HI32, AddrSym, OutContext); + EmitLEASLrri(*OutStreamer, RegS0, RegLR, hiImm, RegS0, STI); + MCOperand ci8 = MCOperand::createImm(8); + MCOperand loImm2 = + createGOTRelExprOp(VEMCExpr::VK_VE_PLT_LO32, GetTLSLabel, OutContext); + EmitLEAzii(*OutStreamer, ci8, loImm2, RegS12, STI); + EmitANDrm0(*OutStreamer, RegS12, ci32, RegS12, STI); + MCOperand hiImm2 = + createGOTRelExprOp(VEMCExpr::VK_VE_PLT_HI32, GetTLSLabel, OutContext); + EmitLEASLrri(*OutStreamer, RegS12, RegLR, hiImm2, RegS12, STI); + EmitBSIC(*OutStreamer, RegLR, RegS12, STI); +} + +void VEAsmPrinter::LowerEH_SJLJ_SETJMPAndEmitMCInsts( + const MachineInstr *MI, const MCSubtargetInfo &STI) { + // sic $dest + // lea $dest, 32($dest) // $dest points 0f + // st $dest, 8(,$src) + // lea $dest, 0 + // br.l 16 // br 1f + // 0: + // lea $dest, 1 + // 1: + + unsigned DestReg = MI->getOperand(0).getReg(); + unsigned SrcReg = MI->getOperand(1).getReg(); + + EmitToStreamer(*OutStreamer, MCInstBuilder(VE::SIC).addReg(DestReg)); + + EmitToStreamer( + *OutStreamer, + MCInstBuilder(VE::LEArzi).addReg(DestReg).addReg(DestReg).addImm(32)); + + EmitToStreamer( + *OutStreamer, + MCInstBuilder(VE::STSri).addReg(SrcReg).addImm(8).addReg(DestReg)); + + EmitToStreamer(*OutStreamer, + MCInstBuilder(VE::LEAzzi).addReg(DestReg).addImm(0)); + + EmitToStreamer(*OutStreamer, MCInstBuilder(VE::BCRLa).addImm(16)); + + EmitToStreamer(*OutStreamer, + MCInstBuilder(VE::LEAzzi).addReg(DestReg).addImm(1)); +} + +void VEAsmPrinter::LowerEH_SJLJ_LONGJMPAndEmitMCInsts( + const MachineInstr *MI, const MCSubtargetInfo &STI) { + // ld %s9, (, $src) // s9 = fp + // ld %s10, 8(, $src) // s10 = lr + // ld %s11, 16(, $src) // s11 = sp + // b.l (%s10) + + unsigned SrcReg = MI->getOperand(0).getReg(); + + EmitToStreamer( + *OutStreamer, + MCInstBuilder(VE::LDSri).addReg(VE::SX9).addReg(SrcReg).addImm(0)); + + EmitToStreamer( + *OutStreamer, + MCInstBuilder(VE::LDSri).addReg(VE::SX10).addReg(SrcReg).addImm(8)); + + EmitToStreamer( + *OutStreamer, + MCInstBuilder(VE::LDSri).addReg(VE::SX11).addReg(SrcReg).addImm(16)); + + EmitToStreamer(*OutStreamer, + MCInstBuilder(VE::BAri).addReg(VE::SX10).addImm(0)); + return; +} + +void VEAsmPrinter::EmitInstruction(const MachineInstr *MI) { + + switch (MI->getOpcode()) { + default: + break; + case TargetOpcode::DBG_VALUE: + // FIXME: Debug Value. + return; + case VE::GETGOT: + LowerGETGOTAndEmitMCInsts(MI, getSubtargetInfo()); + return; + case VE::GETFUNPLT: + LowerGETFunPLTAndEmitMCInsts(MI, getSubtargetInfo()); + return; + case VE::GETTLSADDR: + LowerGETTLSAddrAndEmitMCInsts(MI, getSubtargetInfo()); + return; + // Emit nothing here but a comment if we can. + case VE::MEMBARRIER: + OutStreamer->emitRawComment("MEMBARRIER"); + return; + case VE::EH_SjLj_SetJmp: + LowerEH_SJLJ_SETJMPAndEmitMCInsts(MI, getSubtargetInfo()); + return; + case VE::EH_SjLj_LongJmp: + LowerEH_SJLJ_LONGJMPAndEmitMCInsts(MI, getSubtargetInfo()); + return; + } + MachineBasicBlock::const_instr_iterator I = MI->getIterator(); + MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); + do { + MCInst TmpInst; + LowerVEMachineInstrToMCInst(&*I, TmpInst, *this); + EmitToStreamer(*OutStreamer, TmpInst); + } while ((++I != E) && I->isInsideBundle()); // Delay slot check. +} + +void VEAsmPrinter::EmitFunctionBodyStart() {} + +void VEAsmPrinter::printOperand(const MachineInstr *MI, int opNum, + raw_ostream &O) { + const DataLayout &DL = getDataLayout(); + const MachineOperand &MO = MI->getOperand(opNum); + VEMCExpr::VariantKind TF = (VEMCExpr::VariantKind)MO.getTargetFlags(); + + bool CloseParen = VEMCExpr::printVariantKind(O, TF); + + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << "%" << StringRef(getRegisterName(MO.getReg())).lower(); + break; + + case MachineOperand::MO_Immediate: + O << (int)MO.getImm(); + break; + case MachineOperand::MO_MachineBasicBlock: + MO.getMBB()->getSymbol()->print(O, MAI); + return; + case MachineOperand::MO_GlobalAddress: + getSymbol(MO.getGlobal())->print(O, MAI); + break; + case MachineOperand::MO_BlockAddress: + O << GetBlockAddressSymbol(MO.getBlockAddress())->getName(); + break; + case MachineOperand::MO_ExternalSymbol: + O << MO.getSymbolName(); + break; + case MachineOperand::MO_ConstantPoolIndex: + O << DL.getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_" + << MO.getIndex(); + break; + case MachineOperand::MO_Metadata: + MO.getMetadata()->printAsOperand(O, MMI->getModule()); + break; + default: + llvm_unreachable(""); + } + if (CloseParen) + O << ")"; + VEMCExpr::printVariantKindSuffix(O, TF); +} + +void VEAsmPrinter::printMemASXOperand(const MachineInstr *MI, int opNum, + raw_ostream &O, const char *Modifier) { + // If this is an ADD operand, emit it like normal operands. + if (Modifier && !strcmp(Modifier, "arith")) { + printOperand(MI, opNum, O); + O << ", "; + printOperand(MI, opNum + 1, O); + return; + } + + if (MI->getOperand(opNum + 1).isImm() && + MI->getOperand(opNum + 1).getImm() == 0) { + // don't print "+0" + } else { + printOperand(MI, opNum + 1, O); + } + O << "(,"; + printOperand(MI, opNum, O); + O << ")"; +} + +void VEAsmPrinter::printMemASOperand(const MachineInstr *MI, int opNum, + raw_ostream &O, const char *Modifier) { + // If this is an ADD operand, emit it like normal operands. + if (Modifier && !strcmp(Modifier, "arith")) { + printOperand(MI, opNum, O); + O << ", "; + printOperand(MI, opNum + 1, O); + return; + } + + if (MI->getOperand(opNum + 1).isImm() && + MI->getOperand(opNum + 1).getImm() == 0) { + // don't print "+0" + } else { + printOperand(MI, opNum + 1, O); + } + O << "("; + printOperand(MI, opNum, O); + O << ")"; +} + +/// PrintAsmOperand - Print out an operand for an inline asm expression. +/// +bool VEAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) { + if (ExtraCode[1] != 0) + return true; // Unknown modifier. + + switch (ExtraCode[0]) { + default: + // See if this is a generic print operand + return AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O); + case 'f': + case 'r': + break; + } + } + + printOperand(MI, OpNo, O); + + return false; +} + +bool VEAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, + raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) + return true; // Unknown modifier + + O << '['; + printMemASXOperand(MI, OpNo, O); + O << ']'; + + return false; +} + +// Force static initialization. +extern "C" void LLVMInitializeVEAsmPrinter() { + RegisterAsmPrinter X(getTheVETarget()); +} diff --git a/llvm/lib/Target/VE/VECallingConv.td b/llvm/lib/Target/VE/VECallingConv.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VECallingConv.td @@ -0,0 +1,177 @@ +//===-- VECallingConv.td - Calling Conventions VE ----------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This describes the calling conventions for the VE architectures. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Aurora VE +//===----------------------------------------------------------------------===// + +def CC_VE_C_Stack: CallingConv<[ + // F128 are assigned to the stack in 16-byte aligned units + CCIfType<[f128], CCAssignToStackWithShadow<16, 16, [SX7]>>, + + // float --> need special handling like below. + // 0 4 + // +------+------+ + // | empty| float| + // +------+------+ + CCIfType<[f32], CCCustom<"allocateFloat">>, + + // All of the rest are assigned to the stack in 8-byte aligned units. + CCAssignToStack<0, 8> +]>; + +def CC_VE_RegCall : CallingConv<[ + // vector --> generic vector registers + CCIfType<[v2i32, v2i64, v2f32, v2f64, + v4i32, v4i64, v4f32, v4f64, + v8i32, v8i64, v8f32, v8f64, + v16i32, v16i64, v16f32, v16f64, + v32i32, v32i64, v32f32, v32f64, + v64i32, v64i64, v64f32, v64f64, + v128i32, v128i64, v128f32, v128f64, + v256i32, v256f32, v256i64, v256f64, + v512i32, v512f32], + CCAssignToReg<[V0, V1, V2, V3, V4, V5, V6, V7]>>, + + // vector mask --> generic vector mask registers + CCIfType<[v256i1], + CCAssignToReg<[VM1, VM2, VM3, VM4, VM5, VM6, VM7]>>, + + // pair of vector mask --> generic vector mask registers + CCIfType<[v512i1], + CCAssignToRegWithShadow<[VMP1, VMP2, VMP3], + [VM1, VM1, VM3]>>, + + // Alternatively, they are assigned to the stack in 8-byte aligned units. + CCDelegateTo +]>; + +def CC_VE : CallingConv<[ + // All arguments get passed in generic registers if there is space. + + // Promote i1/i8/i16 arguments to i32. + CCIfType<[i1, i8, i16], CCPromoteToType>, + + // bool, char, int, enum, long --> generic integer 32 bit registers + CCIfType<[i32], CCAssignToRegWithShadow< + [SW0, SW1, SW2, SW3, SW4, SW5, SW6, SW7], + [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, + + // float --> generic floating point 32 bit registers + CCIfType<[f32], CCAssignToRegWithShadow< + [SF0, SF1, SF2, SF3, SF4, SF5, SF6, SF7], + [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, + + // long long/double --> generic 64 bit registers + CCIfType<[i64, f64], + CCAssignToReg<[SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, + + // long double --> pair of generic 64 bit registers + // + // NOTE: If Q1 is allocated while SX1 is free, llvm tries to allocate SX1 for + // following operands, this masks SX1 to avoid such behavior. + CCIfType<[f128], + CCAssignToRegWithShadow<[Q0, Q1, Q2, Q3], + [SX0, SX1, SX3, SX5]>>, + + CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo>, + + // Alternatively, they are assigned to the stack in 8-byte aligned units. + CCDelegateTo +]>; + +// All arguments get passed in stack for varargs function or non-prototyped +// function. +def CC_VE2 : CallingConv<[ + // F128 are assigned to the stack in 16-byte aligned units + CCIfType<[f128], CCAssignToStack<16, 16>>, + + // float --> need special handling like below. + // 0 4 + // +------+------+ + // | empty| float| + // +------+------+ + CCIfType<[f32], CCCustom<"allocateFloat">>, + + CCAssignToStack<0, 8> +]>; + +def RetCC_VE_RegCall : CallingConv<[ + // vector --> generic vector registers + CCIfType<[v2i32, v2i64, v2f32, v2f64, + v4i32, v4i64, v4f32, v4f64, + v8i32, v8i64, v8f32, v8f64, + v16i32, v16i64, v16f32, v16f64, + v32i32, v32i64, v32f32, v32f64, + v64i32, v64i64, v64f32, v64f64, + v128i32, v128i64, v128f32, v128f64, + v256i32, v256f32, v256i64, v256f64, + v512i32, v512f32], + CCAssignToReg<[V0, V1, V2, V3, V4, V5, V6, V7]>>, + + // vector mask --> generic vector mask registers + CCIfType<[v256i1], + CCAssignToReg<[VM1, VM2, VM3, VM4, VM5, VM6, VM7]>>, + + // pair of vector mask --> generic vector mask registers + CCIfType<[v512i1], + CCAssignToRegWithShadow<[VMP1, VMP2, VMP3], + [VM1, VM1, VM3]>> +]>; + +def RetCC_VE : CallingConv<[ + // Promote i1/i8/i16 arguments to i32. + CCIfType<[i1, i8, i16], CCPromoteToType>, + + // bool, char, int, enum, long --> generic integer 32 bit registers + CCIfType<[i32], CCAssignToRegWithShadow< + [SW0, SW1, SW2, SW3, SW4, SW5, SW6, SW7], + [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, + + // float --> generic floating point 32 bit registers + CCIfType<[f32], CCAssignToRegWithShadow< + [SF0, SF1, SF2, SF3, SF4, SF5, SF6, SF7], + [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, + + // long long/double --> generic 64 bit registers + CCIfType<[i64, f64], + CCAssignToReg<[SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>, + + // long double --> pair of generic 64 bit registers + CCIfType<[f128], + CCAssignToRegWithShadow<[Q0, Q1, Q2, Q3], + [SX0, SX1, SX3, SX5]>>, + + CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo> +]>; + +// Callee-saved registers +def CSR : CalleeSavedRegs<(add (sequence "SX%u", 18, 33))>; +def CSR_RegCall : CalleeSavedRegs<(add (sequence "SX%u", 18, 33), + (sequence "V%u", 18, 33), + (sequence "VM%u", 8, 15))>; +def CSR_NoRegs : CalleeSavedRegs<(add)>; + +// vec_expf destroys s0, s1, s5, s6, s11, s61, s62, s63, v0-6, and vm6 +def CSR_vec_expf : CalleeSavedRegs<(add (sequence "SX%u", 2, 4), + (sequence "SX%u", 7, 10), + (sequence "SX%u", 12, 60), + (sequence "V%u", 7, 63), + (sequence "VM%u", 1, 5), + (sequence "VM%u", 7, 15) + )>; + +// llvm_grow_stack destroys s62 and s63 +def CSR_llvm_grow_stack : CalleeSavedRegs<(add (sequence "SX%u", 0, 61), + (sequence "V%u", 0, 63), + (sequence "VM%u", 1, 15) + )>; diff --git a/llvm/lib/Target/VE/VEFrameLowering.h b/llvm/lib/Target/VE/VEFrameLowering.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEFrameLowering.h @@ -0,0 +1,81 @@ +//===-- VEFrameLowering.h - Define frame lowering for VE --*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VEFRAMELOWERING_H +#define LLVM_LIB_TARGET_VE_VEFRAMELOWERING_H + +#include "VE.h" +#include "llvm/CodeGen/TargetFrameLowering.h" + +namespace llvm { + +class VESubtarget; +class VEFrameLowering : public TargetFrameLowering { +public: + explicit VEFrameLowering(const VESubtarget &ST); + + /// emitProlog/emitEpilog - These methods insert prolog and epilog code into + /// the function. + void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override; + void emitPrologueInsns(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, int NumBytes, + bool RequireFPUpdate) const; + void emitEpilogueInsns(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, int NumBytes, + bool RequireFPUpdate) const; + + MachineBasicBlock::iterator + eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const override; + + bool hasReservedCallFrame(const MachineFunction &MF) const override; + bool hasFP(const MachineFunction &MF) const override; + void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, + RegScavenger *RS = nullptr) const override; + + int getFrameIndexReference(const MachineFunction &MF, int FI, + unsigned &FrameReg) const override; + + const SpillSlot * + getCalleeSavedSpillSlots(unsigned &NumEntries) const override { + static const SpillSlot Offsets[] = { + {VE::SX17, 40}, {VE::SX18, 48}, {VE::SX19, 56}, {VE::SX20, 64}, + {VE::SX21, 72}, {VE::SX22, 80}, {VE::SX23, 88}, {VE::SX24, 96}, + {VE::SX25, 104}, {VE::SX26, 112}, {VE::SX27, 120}, {VE::SX28, 128}, + {VE::SX29, 136}, {VE::SX30, 144}, {VE::SX31, 152}, {VE::SX32, 160}, + {VE::SX33, 168}}; + NumEntries = array_lengthof(Offsets); + return Offsets; + } + + /// targetHandlesStackFrameRounding - Returns true if the target is + /// responsible for rounding up the stack frame (probably at emitPrologue + /// time). + bool targetHandlesStackFrameRounding() const override { return true; } + +private: + // Returns true if MF is a leaf procedure. + bool isLeafProc(MachineFunction &MF) const; + + // Emits code for adjusting SP in function prologue/epilogue. + void emitSPAdjustment(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, int NumBytes) const; + + // Emits code for extending SP in function prologue/epilogue. + void emitSPExtend(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, int NumBytes) const; +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VEFrameLowering.cpp b/llvm/lib/Target/VE/VEFrameLowering.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEFrameLowering.cpp @@ -0,0 +1,380 @@ +//===-- VEFrameLowering.cpp - VE Frame Information ------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the VE implementation of TargetFrameLowering class. +// +//===----------------------------------------------------------------------===// + +#include "VEFrameLowering.h" +#include "VEInstrInfo.h" +#include "VEMachineFunctionInfo.h" +#include "VESubtarget.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Target/TargetOptions.h" + +using namespace llvm; + +static cl::opt + DisableLeafProc("disable-ve-leaf-proc", cl::init(false), + cl::desc("Disable VE leaf procedure optimization."), + cl::Hidden); + +VEFrameLowering::VEFrameLowering(const VESubtarget &ST) + : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, Align(16), 0, + Align(16)) {} + +void VEFrameLowering::emitPrologueInsns(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + int NumBytes, + bool RequireFPUpdate) const { + + DebugLoc dl; + const VEInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + // Insert following codes here as prologue + // + // st %fp, 0(,%sp) + // st %lr, 8(,%sp) + // st %got, 24(,%sp) + // st %plt, 32(,%sp) + // or %fp, 0, %sp + + BuildMI(MBB, MBBI, dl, TII.get(VE::STSri)) + .addReg(VE::SX11) + .addImm(0) + .addReg(VE::SX9); + BuildMI(MBB, MBBI, dl, TII.get(VE::STSri)) + .addReg(VE::SX11) + .addImm(8) + .addReg(VE::SX10); + BuildMI(MBB, MBBI, dl, TII.get(VE::STSri)) + .addReg(VE::SX11) + .addImm(24) + .addReg(VE::SX15); + BuildMI(MBB, MBBI, dl, TII.get(VE::STSri)) + .addReg(VE::SX11) + .addImm(32) + .addReg(VE::SX16); + BuildMI(MBB, MBBI, dl, TII.get(VE::ORri), VE::SX9).addReg(VE::SX11).addImm(0); +} + +void VEFrameLowering::emitEpilogueInsns(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + int NumBytes, + bool RequireFPUpdate) const { + + DebugLoc dl; + const VEInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + // Insert following codes here as epilogue + // + // or %sp, 0, %fp + // ld %got, 32(,%sp) + // ld %plt, 24(,%sp) + // ld %lr, 8(,%sp) + // ld %fp, 0(,%sp) + + BuildMI(MBB, MBBI, dl, TII.get(VE::ORri), VE::SX11).addReg(VE::SX9).addImm(0); + BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX16) + .addReg(VE::SX11) + .addImm(32); + BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX15) + .addReg(VE::SX11) + .addImm(24); + BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX10) + .addReg(VE::SX11) + .addImm(8); + BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX9) + .addReg(VE::SX11) + .addImm(0); +} + +void VEFrameLowering::emitSPAdjustment(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + int NumBytes) const { + DebugLoc dl; + const VEInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + + if (NumBytes >= -64 && NumBytes < 63) { + BuildMI(MBB, MBBI, dl, TII.get(VE::ADXri), VE::SX11) + .addReg(VE::SX11) + .addImm(NumBytes); + return; + } + + // Emit following codes. This clobbers SX13 which we always know is + // available here. + // lea %s13,%lo(NumBytes) + // and %s13,%s13,(32)0 + // lea.sl %sp,%hi(NumBytes)(%sp, %s13) + BuildMI(MBB, MBBI, dl, TII.get(VE::LEAzzi), VE::SX13).addImm(LO32(NumBytes)); + BuildMI(MBB, MBBI, dl, TII.get(VE::ANDrm0), VE::SX13) + .addReg(VE::SX13) + .addImm(32); + BuildMI(MBB, MBBI, dl, TII.get(VE::LEASLrri), VE::SX11) + .addReg(VE::SX11) + .addReg(VE::SX13) + .addImm(HI32(NumBytes)); +} + +void VEFrameLowering::emitSPExtend(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + int NumBytes) const { + DebugLoc dl; + const VEInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + + // Emit following codes. It is not possible to insert multiple + // BasicBlocks in PEI pass, so we emit two pseudo instructions here. + // + // EXTEND_STACK // pseudo instrcution + // EXTEND_STACK_GUARD // pseudo instrcution + // + // EXTEND_STACK pseudo will be converted by ExpandPostRA pass into + // following instructions with multiple basic blocks later. + // + // thisBB: + // brge.l.t %sp, %sl, sinkBB + // syscallBB: + // ld %s61, 0x18(, %tp) // load param area + // or %s62, 0, %s0 // spill the value of %s0 + // lea %s63, 0x13b // syscall # of grow + // shm.l %s63, 0x0(%s61) // store syscall # at addr:0 + // shm.l %sl, 0x8(%s61) // store old limit at addr:8 + // shm.l %sp, 0x10(%s61) // store new limit at addr:16 + // monc // call monitor + // or %s0, 0, %s62 // restore the value of %s0 + // sinkBB: + // + // EXTEND_STACK_GUARD pseudo will be simply eliminated by ExpandPostRA + // pass. This pseudo is required to be at the next of EXTEND_STACK + // pseudo in order to protect iteration loop in ExpandPostRA. + + BuildMI(MBB, MBBI, dl, TII.get(VE::EXTEND_STACK)); + BuildMI(MBB, MBBI, dl, TII.get(VE::EXTEND_STACK_GUARD)); +} + +void VEFrameLowering::emitPrologue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); + MachineFrameInfo &MFI = MF.getFrameInfo(); + const VESubtarget &Subtarget = MF.getSubtarget(); + const VEInstrInfo &TII = + *static_cast(Subtarget.getInstrInfo()); + const VERegisterInfo &RegInfo = + *static_cast(Subtarget.getRegisterInfo()); + MachineBasicBlock::iterator MBBI = MBB.begin(); + // Debug location must be unknown since the first debug location is used + // to determine the end of the prologue. + DebugLoc dl; + bool NeedsStackRealignment = RegInfo.needsStackRealignment(MF); + + // FIXME: unfortunately, returning false from canRealignStack + // actually just causes needsStackRealignment to return false, + // rather than reporting an error, as would be sensible. This is + // poor, but fixing that bogosity is going to be a large project. + // For now, just see if it's lied, and report an error here. + if (!NeedsStackRealignment && MFI.getMaxAlignment() > getStackAlignment()) + report_fatal_error("Function \"" + Twine(MF.getName()) + + "\" required " + "stack re-alignment, but LLVM couldn't handle it " + "(probably because it has a dynamic alloca)."); + + // Get the number of bytes to allocate from the FrameInfo + int NumBytes = (int)MFI.getStackSize(); + // The SPARC ABI is a bit odd in that it requires a reserved 92-byte + // (128 in v9) area in the user's stack, starting at %sp. Thus, the + // first part of the stack that can actually be used is located at + // %sp + 92. + // + // We therefore need to add that offset to the total stack size + // after all the stack objects are placed by + // PrologEpilogInserter calculateFrameObjectOffsets. However, since the stack + // needs to be aligned *after* the extra size is added, we need to disable + // calculateFrameObjectOffsets's built-in stack alignment, by having + // targetHandlesStackFrameRounding return true. + + // Add the extra call frame stack size, if needed. (This is the same + // code as in PrologEpilogInserter, but also gets disabled by + // targetHandlesStackFrameRounding) + if (MFI.adjustsStack() && hasReservedCallFrame(MF)) + NumBytes += MFI.getMaxCallFrameSize(); + + // Adds the SPARC subtarget-specific spill area to the stack + // size. Also ensures target-required alignment. + NumBytes = Subtarget.getAdjustedFrameSize(NumBytes); + + // Finally, ensure that the size is sufficiently aligned for the + // data on the stack. + if (MFI.getMaxAlignment() > 0) { + NumBytes = alignTo(NumBytes, MFI.getMaxAlignment()); + } + + // Update stack size with corrected value. + MFI.setStackSize(NumBytes); + + // emit Prologue instructions to save %lr + emitPrologueInsns(MF, MBB, MBBI, NumBytes, true); + + // emit stack adjust instructions + emitSPAdjustment(MF, MBB, MBBI, -NumBytes); + + // emit stack extend instructions + emitSPExtend(MF, MBB, MBBI, -NumBytes); + + unsigned regFP = RegInfo.getDwarfRegNum(VE::SX9, true); + + // Emit ".cfi_def_cfa_register 30". + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, regFP)); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + + // Emit ".cfi_window_save". + CFIIndex = MF.addFrameInst(MCCFIInstruction::createWindowSave(nullptr)); + BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); +} + +MachineBasicBlock::iterator VEFrameLowering::eliminateCallFramePseudoInstr( + MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + if (!hasReservedCallFrame(MF)) { + MachineInstr &MI = *I; + int Size = MI.getOperand(0).getImm(); + if (MI.getOpcode() == VE::ADJCALLSTACKDOWN) + Size = -Size; + + if (Size) + emitSPAdjustment(MF, MBB, I, Size); + } + return MBB.erase(I); +} + +void VEFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); + DebugLoc dl = MBBI->getDebugLoc(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + + int NumBytes = (int)MFI.getStackSize(); + + // emit Epilogue instructions to restore %lr + emitEpilogueInsns(MF, MBB, MBBI, NumBytes, true); +} + +bool VEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { + // Reserve call frame if there are no variable sized objects on the stack. + return !MF.getFrameInfo().hasVarSizedObjects(); +} + +// hasFP - Return true if the specified function should have a dedicated frame +// pointer register. This is true if the function has variable sized allocas or +// if frame pointer elimination is disabled. +bool VEFrameLowering::hasFP(const MachineFunction &MF) const { + const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); + + const MachineFrameInfo &MFI = MF.getFrameInfo(); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + RegInfo->needsStackRealignment(MF) || MFI.hasVarSizedObjects() || + MFI.isFrameAddressTaken(); +} + +int VEFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, + unsigned &FrameReg) const { + const VESubtarget &Subtarget = MF.getSubtarget(); + const MachineFrameInfo &MFI = MF.getFrameInfo(); + const VERegisterInfo *RegInfo = Subtarget.getRegisterInfo(); + const VEMachineFunctionInfo *FuncInfo = MF.getInfo(); + bool isFixed = MFI.isFixedObjectIndex(FI); + + // Addressable stack objects are accessed using neg. offsets from + // %fp, or positive offsets from %sp. + bool UseFP; + + // VE uses FP-based references in general, even when "hasFP" is + // false. That function is rather a misnomer, because %fp is + // actually always available, unless isLeafProc. + if (FuncInfo->isLeafProc()) { + // If there's a leaf proc, all offsets need to be %sp-based, + // because we haven't caused %fp to actually point to our frame. + UseFP = false; + } else if (isFixed) { + // Otherwise, argument access should always use %fp. + UseFP = true; + } else if (RegInfo->needsStackRealignment(MF)) { + // If there is dynamic stack realignment, all local object + // references need to be via %sp, to take account of the + // re-alignment. + UseFP = false; + } else { + // Finally, default to using %fp. + UseFP = true; + } + + int64_t FrameOffset = MF.getFrameInfo().getObjectOffset(FI); + + if (UseFP) { + FrameReg = RegInfo->getFrameRegister(MF); + return FrameOffset; + } else { + FrameReg = VE::SX11; // %sp + return FrameOffset + MF.getFrameInfo().getStackSize(); + } +} + +static bool LLVM_ATTRIBUTE_UNUSED +verifyLeafProcRegUse(MachineRegisterInfo *MRI) { + + // If any of parameter registers are used, this is not leaf function. + for (unsigned reg = VE::SX0; reg <= VE::SX7; ++reg) + if (MRI->isPhysRegUsed(reg)) + return false; + + // If any of callee-saved registers are used, this is not leaf function. + for (unsigned reg = VE::SX18; reg <= VE::SX33; ++reg) + if (MRI->isPhysRegUsed(reg)) + return false; + + return true; +} + +bool VEFrameLowering::isLeafProc(MachineFunction &MF) const { + + MachineRegisterInfo &MRI = MF.getRegInfo(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + + return !(MFI.hasCalls() // has calls + || MRI.isPhysRegUsed(VE::SX18) // Too many registers needed + // (s18 is first CSR) + || MRI.isPhysRegUsed(VE::SX11) // %sp is used + || hasFP(MF)); // need %fp +} + +void VEFrameLowering::determineCalleeSaves(MachineFunction &MF, + BitVector &SavedRegs, + RegScavenger *RS) const { + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); + + if (!DisableLeafProc && isLeafProc(MF)) { + VEMachineFunctionInfo *MFI = MF.getInfo(); + MFI->setLeafProc(true); + } +} diff --git a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp @@ -0,0 +1,275 @@ +//===-- VEISelDAGToDAG.cpp - A dag to dag inst selector for VE ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines an instruction selector for the VE target. +// +//===----------------------------------------------------------------------===// + +#include "VETargetMachine.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Instruction Selector Implementation +//===----------------------------------------------------------------------===// + +//===--------------------------------------------------------------------===// +/// VEDAGToDAGISel - VE specific code to select VE machine +/// instructions for SelectionDAG operations. +/// +namespace { +class VEDAGToDAGISel : public SelectionDAGISel { + /// Subtarget - Keep a pointer to the VE Subtarget around so that we can + /// make the right decision when generating code for different targets. + const VESubtarget *Subtarget; + +public: + explicit VEDAGToDAGISel(VETargetMachine &tm) : SelectionDAGISel(tm) {} + + bool runOnMachineFunction(MachineFunction &MF) override { + Subtarget = &MF.getSubtarget(); + return SelectionDAGISel::runOnMachineFunction(MF); + } + + void Select(SDNode *N) override; + + // Complex Pattern Selectors. + bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2); + bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset); + + /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for + /// inline asm expressions. + bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, + std::vector &OutOps) override; + + StringRef getPassName() const override { + return "VE DAG->DAG Pattern Instruction Selection"; + } + + // Include the pieces autogenerated from the target description. +#include "VEGenDAGISel.inc" + +private: + SDNode *getGlobalBaseReg(); + bool tryInlineAsm(SDNode *N); +}; +} // end anonymous namespace + +SDNode *VEDAGToDAGISel::getGlobalBaseReg() { + unsigned GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF); + return CurDAG + ->getRegister(GlobalBaseReg, TLI->getPointerTy(CurDAG->getDataLayout())) + .getNode(); +} + +bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base, + SDValue &Offset) { + if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex( + FIN->getIndex(), TLI->getPointerTy(CurDAG->getDataLayout())); + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32); + return true; + } + if (Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress || + Addr.getOpcode() == ISD::TargetGlobalTLSAddress) + return false; // direct calls. + + if (Addr.getOpcode() == ISD::ADD) { + if (ConstantSDNode *CN = dyn_cast(Addr.getOperand(1))) { + if (isInt<13>(CN->getSExtValue())) { + if (FrameIndexSDNode *FIN = + dyn_cast(Addr.getOperand(0))) { + // Constant offset from frame ref. + Base = CurDAG->getTargetFrameIndex( + FIN->getIndex(), TLI->getPointerTy(CurDAG->getDataLayout())); + } else { + Base = Addr.getOperand(0); + } + Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), + MVT::i32); + return true; + } + } + } + Base = Addr; + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32); + return true; +} + +bool VEDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) { + if (Addr.getOpcode() == ISD::FrameIndex) + return false; + if (Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress || + Addr.getOpcode() == ISD::TargetGlobalTLSAddress) + return false; // direct calls. + + if (Addr.getOpcode() == ISD::ADD) { + if (ConstantSDNode *CN = dyn_cast(Addr.getOperand(1))) + if (isInt<13>(CN->getSExtValue())) + return false; // Let the reg+imm pattern catch this! + if (Addr.getOperand(0).getOpcode() == VEISD::Lo || + Addr.getOperand(1).getOpcode() == VEISD::Lo) + return false; // Let the reg+imm pattern catch this! + R1 = Addr.getOperand(0); + R2 = Addr.getOperand(1); + return true; + } + + return false; // Let the reg+imm pattern catch this! +} + +// Re-assemble i64 arguments split up in SelectionDAGBuilder's +// visitInlineAsm / GetRegistersForValue functions. +// +// Note: This function was copied from, and is essentially identical +// to ARMISelDAGToDAG::SelectInlineAsm. It is very unfortunate that +// such hacking-up is necessary; a rethink of how inline asm operands +// are handled may be in order to make doing this more sane. +// +// TODO: fix inline asm support so I can simply tell it that 'i64' +// inputs to asm need to be allocated to the IntPair register type, +// and have that work. Then, delete this function. +bool VEDAGToDAGISel::tryInlineAsm(SDNode *N) { + std::vector AsmNodeOperands; + unsigned Flag, Kind; + bool Changed = false; + unsigned NumOps = N->getNumOperands(); + + // Normally, i64 data is bounded to two arbitrary GPRs for "%r" + // constraint. However, some instructions (e.g. ldd/std) require + // (even/even+1) GPRs. + + // So, here, we check for this case, and mutate the inlineasm to use + // a single IntPair register instead, which guarantees such even/odd + // placement. + + SDLoc dl(N); + SDValue Glue = + N->getGluedNode() ? N->getOperand(NumOps - 1) : SDValue(nullptr, 0); + + SmallVector OpChanged; + // Glue node will be appended late. + for (unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; + ++i) { + SDValue op = N->getOperand(i); + AsmNodeOperands.push_back(op); + + if (i < InlineAsm::Op_FirstOperand) + continue; + + if (ConstantSDNode *C = dyn_cast(N->getOperand(i))) { + Flag = C->getZExtValue(); + Kind = InlineAsm::getKind(Flag); + } else + continue; + + // Immediate operands to inline asm in the SelectionDAG are modeled with + // two operands. The first is a constant of value InlineAsm::Kind_Imm, and + // the second is a constant with the value of the immediate. If we get here + // and we have a Kind_Imm, skip the next operand, and continue. + if (Kind == InlineAsm::Kind_Imm) { + SDValue op = N->getOperand(++i); + AsmNodeOperands.push_back(op); + continue; + } + + unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag); + if (NumRegs) + OpChanged.push_back(false); + + unsigned DefIdx = 0; + bool IsTiedToChangedOp = false; + // If it's a use that is tied with a previous def, it has no + // reg class constraint. + if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx)) + IsTiedToChangedOp = OpChanged[DefIdx]; + + if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef && + Kind != InlineAsm::Kind_RegDefEarlyClobber) + continue; + + unsigned RC; + bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC); + if ((!IsTiedToChangedOp && (!HasRC || RC != VE::I64RegClassID)) || + NumRegs != 2) + continue; + + // No IntPairRegister on VE + continue; + } + + if (Glue.getNode()) + AsmNodeOperands.push_back(Glue); + if (!Changed) + return false; + + SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), + CurDAG->getVTList(MVT::Other, MVT::Glue), + AsmNodeOperands); + New->setNodeId(-1); + ReplaceNode(N, New.getNode()); + return true; +} + +void VEDAGToDAGISel::Select(SDNode *N) { + SDLoc dl(N); + if (N->isMachineOpcode()) { + N->setNodeId(-1); + return; // Already selected. + } + + switch (N->getOpcode()) { + default: + break; + case ISD::INLINEASM: { + if (tryInlineAsm(N)) + return; + break; + } + case VEISD::GLOBAL_BASE_REG: + ReplaceNode(N, getGlobalBaseReg()); + return; + } + + SelectCode(N); +} + +/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for +/// inline asm expressions. +bool VEDAGToDAGISel::SelectInlineAsmMemoryOperand( + const SDValue &Op, unsigned ConstraintID, std::vector &OutOps) { + SDValue Op0, Op1; + switch (ConstraintID) { + default: + return true; + case InlineAsm::Constraint_i: + case InlineAsm::Constraint_o: + case InlineAsm::Constraint_m: // memory + if (!SelectADDRrr(Op, Op0, Op1)) + SelectADDRri(Op, Op0, Op1); + break; + } + + OutOps.push_back(Op0); + OutOps.push_back(Op1); + return false; +} + +/// createVEISelDag - This pass converts a legalized DAG into a +/// VE-specific DAG, ready for instruction scheduling. +/// +FunctionPass *llvm::createVEISelDag(VETargetMachine &TM) { + return new VEDAGToDAGISel(TM); +} diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEISelLowering.h @@ -0,0 +1,264 @@ +//===-- VEISelLowering.h - VE DAG Lowering Interface ------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that VE uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VEISELLOWERING_H +#define LLVM_LIB_TARGET_VE_VEISELLOWERING_H + +#include "VE.h" +#include "llvm/CodeGen/TargetLowering.h" + +namespace llvm { +class VESubtarget; + +namespace VEISD { +enum NodeType : unsigned { + FIRST_NUMBER = ISD::BUILTIN_OP_END, + CMPICC, // Compare two GPR operands, set icc+xcc. + CMPFCC, // Compare two FP operands, set fcc. + BRICC, // Branch to dest on icc condition + BRXCC, // Branch to dest on xcc condition (64-bit only). + BRFCC, // Branch to dest on fcc condition + SELECT, + SELECT_ICC, // Select between two values using the current ICC flags. + SELECT_XCC, // Select between two values using the current XCC flags. + SELECT_FCC, // Select between two values using the current FCC flags. + + EH_SJLJ_SETJMP, // SjLj exception handling setjmp. + EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. + EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch. + + Hi, + Lo, // Hi/Lo operations, typically on a global address. + + FTOI, // FP to Int within a FP register. + ITOF, // Int to FP within a FP register. + FTOX, // FP to Int64 within a FP register. + XTOF, // Int64 to FP within a FP register. + + MAX, + MIN, + FMAX, + FMIN, + + GETFUNPLT, // load function address through %plt insturction + GETSTACKTOP, // retrieve address of stack top (first address of + // locals and temporaries) + GETTLSADDR, // load address for TLS access + + MEMBARRIER, // Compiler barrier only; generate a no-op. + + CALL, // A call instruction. + RET_FLAG, // Return with a flag operand. + GLOBAL_BASE_REG, // Global base reg for PIC. + FLUSHW, // FLUSH register windows to stack. + + VEC_BROADCAST, // a scalar value is broadcast across all vector lanes (Operand + // 0: the broadcast register) + VEC_SEQ, // sequence vector match (Operand 0: the constant stride) + + VEC_VMV, + + /// Scatter and gather instructions. + VEC_GATHER, + VEC_SCATTER, + + VEC_LVL, + + /// A wrapper node for TargetConstantPool, TargetJumpTable, + /// TargetExternalSymbol, TargetGlobalAddress, TargetGlobalTLSAddress, + /// MCSymbol and TargetBlockAddress. + Wrapper, +}; +} + +class VETargetLowering : public TargetLowering { + const VESubtarget *Subtarget; + +public: + VETargetLowering(const TargetMachine &TM, const VESubtarget &STI); + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + + /// computeKnownBitsForTargetNode - Determine which of the bits specified + /// in Mask are known to be either zero or one and return them in the + /// KnownZero/KnownOne bitsets. + void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, + const APInt &DemandedElts, + const SelectionDAG &DAG, + unsigned Depth = 0) const override; + + MachineBasicBlock * + EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *MBB) const override; + + const char *getTargetNodeName(unsigned Opcode) const override; + + ConstraintType getConstraintType(StringRef Constraint) const override; + ConstraintWeight + getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const override; + void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, + std::vector &Ops, + SelectionDAG &DAG) const override; + + unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { + if (ConstraintCode == "o") + return InlineAsm::Constraint_o; + return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); + } + + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + + bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; + MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { + return MVT::i32; + } + + Register getRegisterByName(const char *RegName, EVT VT, + const MachineFunction &MF) const override; + + /// Override to support customized stack guard loading. + bool useLoadStackGuardNode() const override; + void insertSSPDeclarations(Module &M) const override; + + /// getSetCCResultType - Return the ISD::SETCC ValueType + EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, + EVT VT) const override; + + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const override; + SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + + SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const override; + + bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool isVarArg, + const SmallVectorImpl &ArgsFlags, + LLVMContext &Context) const override; + SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, const SDLoc &dl, + SelectionDAG &DAG) const override; + + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerToTLSGeneralDynamicModel(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerToTLSLocalExecModel(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBitcast(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMGATHER_MSCATTER(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const; + // Should we expand the build vector with shuffles? + bool + shouldExpandBuildVectorWithShuffles(EVT VT, + unsigned DefinedValues) const override; + + SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const; + + unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const; + SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const; + SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, + SelectionDAG &DAG) const; + SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const; + + bool isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const override; + + bool ShouldShrinkFPConstant(EVT VT) const override { + // Do not shrink FP constpool if VT == MVT::f128. + // (ldd, call _Q_fdtoq) is more expensive than two ldds. + return VT != MVT::f128; + } + + /// Returns true if the target allows unaligned memory accesses of the + /// specified type. + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, + MachineMemOperand::Flags Flags, + bool *Fast) const override; + + bool mergeStoresAfterLegalization(EVT) const override { return true; } + + bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, + const SelectionDAG &DAG) const override; + + unsigned getJumpTableEncoding() const override; + + const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, + const MachineBasicBlock *MBB, + unsigned uid, + MCContext &Ctx) const override; + + bool shouldInsertFencesForAtomic(const Instruction *I) const override { + // VE uses Release consistency, so need fence for each atomics. + return true; + } + Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, + AtomicOrdering Ord) const override; + Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, + AtomicOrdering Ord) const override; + + AtomicExpansionKind + shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + + void ReplaceNodeResults(SDNode *N, SmallVectorImpl &Results, + SelectionDAG &DAG) const override; + + MachineBasicBlock *expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, + unsigned BROpcode) const; + MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI, + MachineBasicBlock *MBB) const; + MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI, + MachineBasicBlock *MBB) const; + MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr &MI, + MachineBasicBlock *BB) const; + void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB, + MachineBasicBlock *DispatchBB, int FI) const; + void finalizeLowering(MachineFunction &MF) const override; + + // VE supports only vector FMA + bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, + EVT VT) const override { + return VT.isVector(); + } +}; +} // namespace llvm + +#endif // VE_ISELLOWERING_H diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -0,0 +1,3270 @@ +//===-- VEISelLowering.cpp - VE DAG Lowering Implementation ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the interfaces that VE uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#include "VEISelLowering.h" +#include "MCTargetDesc/VEMCExpr.h" +#include "VEInstrBuilder.h" +#include "VEMachineFunctionInfo.h" +#include "VERegisterInfo.h" +#include "VETargetMachine.h" +// #include "VETargetObjectFile.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/KnownBits.h" +using namespace llvm; + +#define DEBUG_TYPE "ve-lower" + +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +//===----------------------------------------------------------------------===// + +static bool allocateFloat(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State) { + switch (LocVT.SimpleTy) { + case MVT::f32: { + // Allocate stack like below + // 0 4 + // +------+------+ + // | empty| float| + // +------+------+ + // Use align=8 for dummy area to align the beginning of these 2 area. + State.AllocateStack(4, 8); // for empty area + // Use align=4 for value to place it at just after the dummy area. + unsigned Offset = State.AllocateStack(4, 4); // for float value area + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); + return true; + } + default: + return false; + } +} + +#include "VEGenCallingConv.inc" + +bool VETargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, + const SmallVectorImpl &Outs, LLVMContext &Context) const { + CCAssignFn *RetCC = RetCC_VE; + SmallVector RVLocs; + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); + return CCInfo.CheckReturn(Outs, RetCC); +} + +SDValue VETargetLowering::LowerBitcast(SDValue Op, SelectionDAG &DAG) const { + if (Op.getSimpleValueType() == MVT::v256i64 && + Op.getOperand(0).getSimpleValueType() == MVT::v256f64) { + LLVM_DEBUG(dbgs() << "Lowering bitcast of similar types.\n"); + return Op.getOperand(0); + } else { + return Op; + } +} + +SDValue VETargetLowering::LowerMGATHER_MSCATTER(SDValue Op, + SelectionDAG &DAG) const { + LLVM_DEBUG(dbgs() << "Lowering gather or scatter\n"); + SDLoc dl(Op); + // dbgs() << "\nNext Instr:\n"; + // Op.dumpr(&DAG); + + MaskedGatherScatterSDNode *N = cast(Op.getNode()); + + SDValue Index = N->getIndex(); + SDValue BasePtr = N->getBasePtr(); + SDValue Mask = N->getMask(); + SDValue Chain = N->getChain(); + + SDValue PassThru; + SDValue Source; + + if (Op.getOpcode() == ISD::MGATHER) { + MaskedGatherSDNode *N = cast(Op.getNode()); + PassThru = N->getPassThru(); + } else if (Op.getOpcode() == ISD::MSCATTER) { + MaskedScatterSDNode *N = cast(Op.getNode()); + Source = N->getValue(); + } else { + return SDValue(); + } + + MVT IndexVT = Index.getSimpleValueType(); + // MVT MaskVT = Mask.getSimpleValueType(); + // MVT BasePtrVT = BasePtr.getSimpleValueType(); + + // vindex = vindex + baseptr; + SDValue BaseBroadcast = + DAG.getNode(VEISD::VEC_BROADCAST, dl, IndexVT, BasePtr); + SDValue ScaleBroadcast = + DAG.getNode(VEISD::VEC_BROADCAST, dl, IndexVT, N->getScale()); + + SDValue index_addr = + DAG.getNode(ISD::MUL, dl, IndexVT, {Index, ScaleBroadcast}); + + SDValue addresses = + DAG.getNode(ISD::ADD, dl, IndexVT, {BaseBroadcast, index_addr}); + + // TODO: vmx = svm (mask); + // Mask.dumpr(&DAG); + if (Mask.getOpcode() != ISD::BUILD_VECTOR || Mask.getNumOperands() != 256) { + LLVM_DEBUG(dbgs() << "Cannot handle gathers with complex masks.\n"); + return SDValue(); + } + for (unsigned i = 0; i < 256; i++) { + const SDValue Operand = Mask.getOperand(i); + if (Operand.getOpcode() != ISD::Constant) { + LLVM_DEBUG( + dbgs() << "Cannot handle gather masks with complex elements.\n"); + return SDValue(); + } + if (Mask.getConstantOperandVal(i) != 1) { + LLVM_DEBUG(dbgs() << "Cannot handle gather masks with elements != 1.\n"); + return SDValue(); + } + } + + if (Op.getOpcode() == ISD::MGATHER) { + // vt = vgt (vindex, vmx, cs=0, sx=0, sy=0, sw=0); + SDValue load = DAG.getNode(VEISD::VEC_GATHER, dl, Op.getNode()->getVTList(), + {Chain, addresses}); + // load.dumpr(&DAG); + + // TODO: merge (vt, default, vmx); + // PassThru.dumpr(&DAG); + // We can safely ignore PassThru right now, the mask is guaranteed to be + // constant 1s. + + return load; + } else { + SDValue store = + DAG.getNode(VEISD::VEC_SCATTER, dl, Op.getNode()->getVTList(), + {Chain, Source, addresses}); + // store.dumpr(&DAG); + return store; + } +} + +SDValue VETargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const { + LLVM_DEBUG(dbgs() << "Lowering MLOAD\n"); + LLVM_DEBUG(Op.dumpr(&DAG)); + SDLoc dl(Op); + + MaskedLoadSDNode *N = cast(Op.getNode()); + + SDValue BasePtr = N->getBasePtr(); + SDValue Mask = N->getMask(); + SDValue Chain = N->getChain(); + SDValue PassThru = N->getPassThru(); + + MachinePointerInfo info = N->getPointerInfo(); + + if (Mask.getOpcode() != ISD::BUILD_VECTOR || Mask.getNumOperands() != 256) { + LLVM_DEBUG(dbgs() << "Cannot handle gathers with complex masks.\n"); + return SDValue(); + } + + int firstzero = 256; + + for (unsigned i = 0; i < 256; i++) { + const SDValue Operand = Mask.getOperand(i); + if (Operand.getOpcode() != ISD::Constant) { + LLVM_DEBUG(dbgs() << "Cannot handle load masks with complex elements.\n"); + return SDValue(); + } + if (Mask.getConstantOperandVal(i) != 1) { + if (firstzero == 256) + firstzero = i; + if (!PassThru.isUndef() && !PassThru.getOperand(i).isUndef()) { + LLVM_DEBUG(dbgs() << "Cannot handle passthru.\n"); + return SDValue(); + } + } else { + if (firstzero != 256) { + LLVM_DEBUG(dbgs() << "Cannot handle mixed load masks.\n"); + return SDValue(); + } + } + } + + EVT i32 = EVT::getIntegerVT(*DAG.getContext(), 32); + + // FIXME: LVL instruction has output VL now, need to update VEC_LVL too. + Chain = DAG.getNode(VEISD::VEC_LVL, dl, MVT::Other, + {Chain, DAG.getConstant(firstzero, dl, i32)}); + + SDValue load = DAG.getLoad(Op.getSimpleValueType(), dl, Chain, BasePtr, info); + + // FIXME: LVL instruction has output VL now, need to update VEC_LVL too. + Chain = DAG.getNode(VEISD::VEC_LVL, dl, MVT::Other, + {load.getValue(1), DAG.getConstant(256, dl, i32)}); + + SDValue merge = DAG.getMergeValues({load, Chain}, dl); + LLVM_DEBUG(dbgs() << "Becomes\n"); + LLVM_DEBUG(merge.dumpr(&DAG)); + return merge; +} + +static bool isBroadCastOrS2V(BuildVectorSDNode *BVN, bool &AllUndef, bool &S2V, + unsigned &FirstDef) { + // Check UNDEF or FirstDef + AllUndef = true; + S2V = false; + FirstDef = 0; + for (unsigned i = 0; i < BVN->getNumOperands(); ++i) { + if (!BVN->getOperand(i).isUndef()) { + AllUndef = false; + FirstDef = i; + break; + } + } + if (AllUndef) + return true; + // Check scalar_to_vector (single def at first, and the rests are undef) + if (FirstDef == 0) { + S2V = true; + for (unsigned i = FirstDef + 1; i < BVN->getNumOperands(); ++i) { + if (!BVN->getOperand(i).isUndef()) { + S2V = false; + break; + } + } + if (S2V) + return true; + } + // Check boradcast + for (unsigned i = FirstDef + 1; i < BVN->getNumOperands(); ++i) { + if (BVN->getOperand(FirstDef) != BVN->getOperand(i) && + !BVN->getOperand(i).isUndef()) { + return false; + } + } + return true; +} + +SDValue VETargetLowering::LowerBUILD_VECTOR(SDValue Op, + SelectionDAG &DAG) const { + LLVM_DEBUG(dbgs() << "Lowering BUILD_VECTOR\n"); + BuildVectorSDNode *BVN = cast(Op.getNode()); + + SDLoc DL(Op); + + // match VEC_BROADCAST + bool AllUndef; + bool S2V; + unsigned FirstDef; + if (isBroadCastOrS2V(BVN, AllUndef, S2V, FirstDef)) { + if (AllUndef) { + LLVM_DEBUG(dbgs() << "AllUndef: VEC_BROADCAST "); + LLVM_DEBUG(BVN->getOperand(0)->dump()); + return DAG.getNode(VEISD::VEC_BROADCAST, DL, Op.getSimpleValueType(), + BVN->getOperand(0)); + } else if (S2V) { + LLVM_DEBUG(dbgs() << "isS2V: scalar_to_vector "); + LLVM_DEBUG(BVN->getOperand(FirstDef)->dump()); + return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, Op.getSimpleValueType(), + BVN->getOperand(FirstDef)); + } else { + LLVM_DEBUG(dbgs() << "isBroadCast: VEC_BROADCAST "); + LLVM_DEBUG(BVN->getOperand(FirstDef)->dump()); + return DAG.getNode(VEISD::VEC_BROADCAST, DL, Op.getSimpleValueType(), + BVN->getOperand(FirstDef)); + } + } + +#if 0 + if (BVN->isConstant()) { + // All values are either a constant value or undef, so optimize it... + } +#endif + + // match VEC_SEQ(stride) patterns + // identify a constant stride vector + bool hasConstantStride = true; + + // whether the constant is a repetition of ascending indices, eg <0, 1, 2, 3, + // 0, 1, 2, 3, ..> + bool hasBlockStride = false; + + // whether the constant is an ascending sequence of repeated indices, eg <0, + // 0, 1, 1, 2, 2, 3, 3 ..> + bool hasBlockStride2 = false; + + bool firstStride = true; + int64_t blockLength = 0; + int64_t stride = 0; + int64_t lastElemValue = 0; + MVT elemTy; + + for (unsigned i = 0; i < BVN->getNumOperands(); ++i) { + if (hasBlockStride) { + if (i % blockLength == 0) + stride = 1; + else + stride = 0; + } + + if (BVN->getOperand(i).isUndef()) { + if (hasBlockStride2 && i % blockLength == 0) + lastElemValue = 0; + else + lastElemValue += stride; + continue; + } + + // is this an immediate constant value? + auto *constNumElem = dyn_cast(BVN->getOperand(i)); + if (!constNumElem) { + hasConstantStride = false; + hasBlockStride = false; + hasBlockStride2 = false; + break; + } + + // read value + int64_t elemValue = constNumElem->getSExtValue(); + elemTy = constNumElem->getSimpleValueType(0); + + if (i == FirstDef) { + // FIXME: Currently, this code requies that first value of vseq + // is zero. This is possible to enhance like thses instructions: + // VSEQ $v0 + // VBRD $v1, 2 + // VADD $v0, $v0, $v1 + if (elemValue != 0) { + hasConstantStride = false; + hasBlockStride = false; + hasBlockStride2 = false; + break; + } + } else if (i > FirstDef && firstStride) { + // first stride + stride = (elemValue - lastElemValue) / (i - FirstDef); + firstStride = false; + } else if (i > FirstDef) { + // later stride + if (hasBlockStride2 && elemValue == 0 && i % blockLength == 0) { + lastElemValue = 0; + continue; + } + int64_t thisStride = elemValue - lastElemValue; + if (thisStride != stride) { + hasConstantStride = false; + if (!hasBlockStride && thisStride == 1 && stride == 0 && + lastElemValue == 0) { + hasBlockStride = true; + blockLength = i; + } else if (!hasBlockStride2 && elemValue == 0 && + lastElemValue + 1 == i) { + hasBlockStride2 = true; + blockLength = i; + } else { + // not blockStride anymore. e.g. { 0, 1, 2, 3, 0, 0, 0, 0 } + hasBlockStride = false; + hasBlockStride2 = false; + break; + } + } + } + + // track last elem value + lastElemValue = elemValue; + } + + // detected a proper stride pattern + if (hasConstantStride) { + SDValue seq = DAG.getNode( + VEISD::VEC_SEQ, DL, Op.getSimpleValueType(), + DAG.getConstant(1, DL, elemTy)); // TODO draw strideTy from elements + if (stride == 1) { + LLVM_DEBUG(dbgs() << "ConstantStride: VEC_SEQ\n"); + LLVM_DEBUG(seq.dump()); + return seq; + } + + SDValue const_stride = + DAG.getNode(VEISD::VEC_BROADCAST, DL, Op.getSimpleValueType(), + DAG.getConstant(stride, DL, elemTy)); + SDValue ret = + DAG.getNode(ISD::MUL, DL, Op.getSimpleValueType(), {seq, const_stride}); + LLVM_DEBUG(dbgs() << "ConstantStride: VEC_SEQ * VEC_BROADCAST\n"); + LLVM_DEBUG(const_stride.dump()); + LLVM_DEBUG(ret.dump()); + return ret; + } + + // codegen for <0, 0, .., 0, 0, 1, 1, .., 1, 1, .....> constant patterns + // constant == VSEQ >> log2(blockLength) + if (hasBlockStride) { + int64_t blockLengthLog = log2(blockLength); + + if (pow(2, blockLengthLog) == blockLength) { + SDValue sequence = + DAG.getNode(VEISD::VEC_SEQ, DL, Op.getSimpleValueType(), + DAG.getConstant(1, DL, elemTy)); + SDValue shiftbroadcast = + DAG.getNode(VEISD::VEC_BROADCAST, DL, Op.getSimpleValueType(), + DAG.getConstant(blockLengthLog, DL, elemTy)); + + SDValue shift = DAG.getNode(ISD::SRL, DL, Op.getSimpleValueType(), + {sequence, shiftbroadcast}); + LLVM_DEBUG(dbgs() << "BlockStride: VEC_SEQ >> VEC_BROADCAST\n"); + LLVM_DEBUG(sequence.dump()); + LLVM_DEBUG(shiftbroadcast.dump()); + LLVM_DEBUG(shift.dump()); + return shift; + } + } + + // codegen for <0, 1, .., 15, 0, 1, .., ..... > constant patterns + // constant == VSEQ % blockLength + if (hasBlockStride2) { + int64_t blockLengthLog = log2(blockLength); + + if (pow(2, blockLengthLog) == blockLength) { + SDValue sequence = + DAG.getNode(VEISD::VEC_SEQ, DL, Op.getSimpleValueType(), + DAG.getConstant(1, DL, elemTy)); + SDValue modulobroadcast = + DAG.getNode(VEISD::VEC_BROADCAST, DL, Op.getSimpleValueType(), + DAG.getConstant(blockLength - 1, DL, elemTy)); + + SDValue modulo = DAG.getNode(ISD::AND, DL, Op.getSimpleValueType(), + {sequence, modulobroadcast}); + + LLVM_DEBUG(dbgs() << "BlockStride2: VEC_SEQ & VEC_BROADCAST\n"); + LLVM_DEBUG(sequence.dump()); + LLVM_DEBUG(modulobroadcast.dump()); + LLVM_DEBUG(modulo.dump()); + return modulo; + } + } + + // Otherwise, generate element-wise insertions. + SDValue newVector = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, + Op.getSimpleValueType()), + 0); + + for (unsigned i = 0; i < BVN->getNumOperands(); ++i) { + newVector = DAG.getNode( + ISD::INSERT_VECTOR_ELT, DL, Op.getSimpleValueType(), newVector, + BVN->getOperand(i), + DAG.getConstant(i, DL, EVT::getIntegerVT(*DAG.getContext(), 64))); + } + return newVector; +} + +SDValue +VETargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SDLoc &DL, SelectionDAG &DAG) const { + // CCValAssign - represent the assignment of the return value to locations. + SmallVector RVLocs; + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + // Analyze return values. + CCInfo.AnalyzeReturn(Outs, RetCC_VE); + + SDValue Flag; + SmallVector RetOps(1, Chain); + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + SDValue OutVal = OutVals[i]; + + // Integer return values must be sign or zero extended by the callee. + switch (VA.getLocInfo()) { + case CCValAssign::Full: + break; + case CCValAssign::SExt: + OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal); + break; + case CCValAssign::ZExt: + OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal); + break; + case CCValAssign::AExt: + OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal); + break; + default: + llvm_unreachable("Unknown loc info!"); + } + + // The custom bit on an i32 return value indicates that it should be passed + // in the high bits of the register. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) { + OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal, + DAG.getConstant(32, DL, MVT::i32)); + + // The next value may go in the low bits of the same register. + // Handle both at once. + if (i + 1 < RVLocs.size() && + RVLocs[i + 1].getLocReg() == VA.getLocReg()) { + SDValue NV = + DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i + 1]); + OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV); + // Skip the next value, it's already done. + ++i; + } + } + + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag); + + // Guarantee that all emitted copies are stuck together with flags. + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + RetOps[0] = Chain; // Update chain. + + // Add the flag if we have it. + if (Flag.getNode()) + RetOps.push_back(Flag); + + return DAG.getNode(VEISD::RET_FLAG, DL, MVT::Other, RetOps); +} + +SDValue VETargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + + // Get the base offset of the incoming arguments stack space. + unsigned ArgsBaseOffset = 176; + // Get the size of the preserved arguments area + unsigned ArgsPreserved = 64; + + // Analyze arguments according to CC_VE. + SmallVector ArgLocs; + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + // Allocate the preserved area first. + CCInfo.AllocateStack(ArgsPreserved, 8); + // We already allocated the preserved area, so the stack offset computed + // by CC_VE would be correct now. + CCInfo.AnalyzeFormalArguments(Ins, CC_VE); + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + if (VA.isRegLoc()) { + // This argument is passed in a register. + // All integer register arguments are promoted by the caller to i64. + + // Create a virtual register for the promoted live-in value. + unsigned VReg = + MF.addLiveIn(VA.getLocReg(), getRegClassFor(VA.getLocVT())); + SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT()); + + // Get the high bits for i32 struct elements. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) + Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg, + DAG.getConstant(32, DL, MVT::i32)); + + // The caller promoted the argument, so insert an Assert?ext SDNode so we + // won't promote the value again in this function. + switch (VA.getLocInfo()) { + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg, + DAG.getValueType(VA.getValVT())); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg, + DAG.getValueType(VA.getValVT())); + break; + default: + break; + } + + // Truncate the register down to the argument type. + if (VA.isExtInLoc()) + Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); + + InVals.push_back(Arg); + continue; + } + + // The registers are exhausted. This argument was passed on the stack. + assert(VA.isMemLoc()); + // The CC_VE_Full/Half functions compute stack offsets relative to the + // beginning of the arguments area at %fp+176. + unsigned Offset = VA.getLocMemOffset() + ArgsBaseOffset; + unsigned ValSize = VA.getValVT().getSizeInBits() / 8; + // Adjust offset for extended arguments, SPARC is big-endian. + // The caller will have written the full slot with extended bytes, but we + // prefer our own extending loads. + if (VA.isExtInLoc()) + Offset += 8 - ValSize; + int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true); + InVals.push_back( + DAG.getLoad(VA.getValVT(), DL, Chain, + DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())), + MachinePointerInfo::getFixedStack(MF, FI))); + } + + if (!IsVarArg) + return Chain; + + // This function takes variable arguments, some of which may have been passed + // in registers %s0-%s8. + // + // The va_start intrinsic needs to know the offset to the first variable + // argument. + // TODO: need to calculate offset correctly once we support f128. + unsigned ArgOffset = ArgLocs.size() * 8; + VEMachineFunctionInfo *FuncInfo = MF.getInfo(); + // Skip the 176 bytes of register save area. + FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgsBaseOffset); + + return Chain; +} + +// FIXME? Maybe this could be a TableGen attribute on some registers and +// this table could be generated automatically from RegInfo. +Register VETargetLowering::getRegisterByName(const char *RegName, EVT VT, + const MachineFunction &MF) const { + unsigned Reg = StringSwitch(RegName) + .Case("sp", VE::SX11) // Stack pointer + .Case("fp", VE::SX9) // Frame pointer + .Case("sl", VE::SX8) // Stack limit + .Case("lr", VE::SX10) // Link regsiter + .Case("tp", VE::SX14) // Thread pointer + .Case("outer", VE::SX12) // Outer regiser + .Case("info", VE::SX17) // Info area register + .Case("got", VE::SX15) // Global offset table register + .Case("plt", VE::SX16) // Procedure linkage table register + .Case("usrcc", VE::UCC) // User clock counter + .Default(0); + + if (Reg) + return Reg; + + report_fatal_error("Invalid register name global variable"); +} + +// This functions returns true if CalleeName is a ABI function that returns +// a long double (fp128). +static bool isFP128ABICall(const char *CalleeName) { + static const char *const ABICalls[] = { + "_Q_add", "_Q_sub", "_Q_mul", "_Q_div", "_Q_sqrt", + "_Q_neg", "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq", + "_Q_lltoq", "_Q_ulltoq", nullptr}; + for (const char *const *I = ABICalls; *I != nullptr; ++I) + if (strcmp(CalleeName, *I) == 0) + return true; + return false; +} + +unsigned VETargetLowering::getSRetArgSize(SelectionDAG &DAG, + SDValue Callee) const { + const Function *CalleeFn = nullptr; + if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + CalleeFn = dyn_cast(G->getGlobal()); + } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { + const Function &F = DAG.getMachineFunction().getFunction(); + const Module *M = F.getParent(); + const char *CalleeName = E->getSymbol(); + CalleeFn = M->getFunction(CalleeName); + if (!CalleeFn && isFP128ABICall(CalleeName)) + return 16; // Return sizeof(fp128) + } + + if (!CalleeFn) + return 0; + + // It would be nice to check for the sret attribute on CalleeFn here, + // but since it is not part of the function type, any check will misfire. + + PointerType *Ty = cast(CalleeFn->arg_begin()->getType()); + Type *ElementTy = Ty->getElementType(); + return DAG.getDataLayout().getTypeAllocSize(ElementTy); +} + +SDValue VETargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const { + SelectionDAG &DAG = CLI.DAG; + SDLoc DL = CLI.DL; + SDValue Chain = CLI.Chain; + auto PtrVT = getPointerTy(DAG.getDataLayout()); + + // VE target does not yet support tail call optimization. + CLI.IsTailCall = false; + + // Get the base offset of the outgoing arguments stack space. + unsigned ArgsBaseOffset = 176; + // Get the size of the preserved arguments area + unsigned ArgsPreserved = 8 * 8u; + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ArgLocs; + CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + // Allocate the preserved area first. + CCInfo.AllocateStack(ArgsPreserved, 8); + // We already allocated the preserved area, so the stack offset computed + // by CC_VE would be correct now. + CCInfo.AnalyzeCallOperands(CLI.Outs, CC_VE); + + // VE requires to use both register and stack for varargs or no-prototyped + // functions. FIXME: How to check prototype here? + bool UseBoth = CLI.IsVarArg /* || CLI.NoProtoType */; + + // Analyze operands again if it is required to store BOTH. + SmallVector ArgLocs2; + CCState CCInfo2(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), + ArgLocs2, *DAG.getContext()); + if (UseBoth) + CCInfo2.AnalyzeCallOperands(CLI.Outs, CC_VE2); + + // Get the size of the outgoing arguments stack space requirement. + unsigned ArgsSize = CCInfo.getNextStackOffset(); + + // Keep stack frames 16-byte aligned. + ArgsSize = alignTo(ArgsSize, 16); + + // Adjust the stack pointer to make room for the arguments. + // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls + // with more than 6 arguments. + Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL); + + // Collect the set of registers to pass to the function and their values. + // This will be emitted as a sequence of CopyToReg nodes glued to the call + // instruction. + SmallVector, 8> RegsToPass; + + // Collect chains from all the memory opeations that copy arguments to the + // stack. They must follow the stack pointer adjustment above and precede the + // call instruction itself. + SmallVector MemOpChains; + + // VE needs to get address of callee function in a register + // So, prepare to copy it to SX12 here. + + // If the callee is a GlobalAddress node (quite common, every direct call is) + // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. + // Likewise ExternalSymbol -> TargetExternalSymbol. + SDValue Callee = CLI.Callee; + + bool IsPICCall = isPositionIndependent(); + + // PC-relative references to external symbols should go through $stub. + // If so, we need to prepare GlobalBaseReg first. + const TargetMachine &TM = DAG.getTarget(); + const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); + const GlobalValue *GV = nullptr; + if (auto *G = dyn_cast(Callee)) + GV = G->getGlobal(); + bool Local = TM.shouldAssumeDSOLocal(*Mod, GV); + bool UsePlt = !Local; + MachineFunction &MF = DAG.getMachineFunction(); + + // Turn GlobalAddress/ExternalSymbol node into a value node + // containing the address of them here. + if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + if (IsPICCall) { + if (UsePlt) + Subtarget->getInstrInfo()->getGlobalBaseReg(&MF); + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, 0); + Callee = DAG.getNode(VEISD::GETFUNPLT, DL, PtrVT, Callee); + } else { + Callee = + makeHiLoPair(Callee, VEMCExpr::VK_VE_HI32, VEMCExpr::VK_VE_LO32, DAG); + } + } else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) { + if (IsPICCall) { + if (UsePlt) + Subtarget->getInstrInfo()->getGlobalBaseReg(&MF); + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0); + Callee = DAG.getNode(VEISD::GETFUNPLT, DL, PtrVT, Callee); + } else { + Callee = + makeHiLoPair(Callee, VEMCExpr::VK_VE_HI32, VEMCExpr::VK_VE_LO32, DAG); + } + } + + RegsToPass.push_back(std::make_pair(VE::SX12, Callee)); + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = CLI.OutVals[i]; + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown location info!"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); + break; + } + + if (VA.isRegLoc()) { + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + if (!UseBoth) + continue; + VA = ArgLocs2[i]; + } + + assert(VA.isMemLoc()); + + // Create a store off the stack pointer for this argument. + SDValue StackPtr = DAG.getRegister(VE::SX11, PtrVT); + // The argument area starts at %fp+176 in the callee frame, + // %sp+176 in ours. + SDValue PtrOff = + DAG.getIntPtrConstant(VA.getLocMemOffset() + ArgsBaseOffset, DL); + PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); + MemOpChains.push_back( + DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo())); + } + + // Emit all stores, make sure they occur before the call. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); + + // Build a sequence of CopyToReg nodes glued together with token chain and + // glue operands which copy the outgoing args into registers. The InGlue is + // necessary since all emitted instructions must be stuck together in order + // to pass the live physical registers. + SDValue InGlue; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first, + RegsToPass[i].second, InGlue); + InGlue = Chain.getValue(1); + } + + // Build the operands for the call instruction itself. + SmallVector Ops; + Ops.push_back(Chain); + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + // Add a register mask operand representing the call-preserved registers. + const VERegisterInfo *TRI = Subtarget->getRegisterInfo(); + const uint32_t *Mask = + TRI->getCallPreservedMask(DAG.getMachineFunction(), CLI.CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + Ops.push_back(DAG.getRegisterMask(Mask)); + + // Make sure the CopyToReg nodes are glued to the call instruction which + // consumes the registers. + if (InGlue.getNode()) + Ops.push_back(InGlue); + + // Now the call itself. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + Chain = DAG.getNode(VEISD::CALL, DL, NodeTys, Ops); + InGlue = Chain.getValue(1); + + // Revert the stack pointer immediately after the call. + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true), + DAG.getIntPtrConstant(0, DL, true), InGlue, DL); + InGlue = Chain.getValue(1); + + // Now extract the return values. This is more or less the same as + // LowerFormalArguments. + + // Assign locations to each value returned by this call. + SmallVector RVLocs; + CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + // Set inreg flag manually for codegen generated library calls that + // return float. + if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS) + CLI.Ins[0].Flags.setInReg(); + + RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_VE); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + unsigned Reg = VA.getLocReg(); + + // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can + // reside in the same register in the high and low bits. Reuse the + // CopyFromReg previous node to avoid duplicate copies. + SDValue RV; + if (RegisterSDNode *SrcReg = dyn_cast(Chain.getOperand(1))) + if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg) + RV = Chain.getValue(0); + + // But usually we'll create a new CopyFromReg for a different register. + if (!RV.getNode()) { + RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue); + Chain = RV.getValue(1); + InGlue = Chain.getValue(2); + } + + // Get the high bits for i32 struct elements. + if (VA.getValVT() == MVT::i32 && VA.needsCustom()) + RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV, + DAG.getConstant(32, DL, MVT::i32)); + + // The callee promoted the return value, so insert an Assert?ext SDNode so + // we won't promote the value again in this function. + switch (VA.getLocInfo()) { + case CCValAssign::SExt: + RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV, + DAG.getValueType(VA.getValVT())); + break; + case CCValAssign::ZExt: + RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV, + DAG.getValueType(VA.getValVT())); + break; + default: + break; + } + + // Truncate the register down to the return value type. + if (VA.isExtInLoc()) + RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV); + + InVals.push_back(RV); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// TargetLowering Implementation +//===----------------------------------------------------------------------===// + +/// isFPImmLegal - Returns true if the target can instruction select the +/// specified FP immediate natively. If false, the legalizer will +/// materialize the FP immediate as a load from a constant pool. +bool VETargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const { + return VT == MVT::f32 || VT == MVT::f64; +} + +/// Determine if the target supports unaligned memory accesses. +/// +/// This function returns true if the target allows unaligned memory accesses +/// of the specified type in the given address space. If true, it also returns +/// whether the unaligned memory access is "fast" in the last argument by +/// reference. This is used, for example, in situations where an array +/// copy/move/set is converted to a sequence of store operations. Its use +/// helps to ensure that such replacements don't generate code that causes an +/// alignment error (trap) on the target machine. +bool VETargetLowering::allowsMisalignedMemoryAccesses(EVT VT, + unsigned AddrSpace, + unsigned Align, + MachineMemOperand::Flags, + bool *Fast) const { + // VE requires aligned accesses for vector accesses + if (VT.isVector()) + return false; + + if (Fast) { + // It's fast anytime on VE + *Fast = true; + } + return true; +} + +bool VETargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT, + const SelectionDAG &DAG) const { + // VE's vectorization is experimental, so disable to use vector stores + // if vectorize feature is disabled. + if (!Subtarget->vectorize()) { + if (MemVT.isVector()) { + return false; + } + } + + // Do not merge to float value size (128 bytes) if no implicit + // float attribute is set. + bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( + Attribute::NoImplicitFloat); + + if (NoFloat) { + unsigned MaxIntSize = 64; + return (MemVT.getSizeInBits() <= MaxIntSize); + } + return true; +} + +TargetLowering::AtomicExpansionKind +VETargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { + if (AI->getOperation() == AtomicRMWInst::Xchg) { + const DataLayout &DL = AI->getModule()->getDataLayout(); + if (DL.getTypeStoreSize(AI->getValOperand()->getType()) < + (VETargetLowering::getMinCmpXchgSizeInBits() / 8)) + return AtomicExpansionKind::CmpXChg; // Uses cas instruction for 1byte or + // 2byte atomic_swap + return AtomicExpansionKind::None; // Uses ts1am instruction + } + return AtomicExpansionKind::CmpXChg; +} + +VETargetLowering::VETargetLowering(const TargetMachine &TM, + const VESubtarget &STI) + : TargetLowering(TM), Subtarget(&STI) { + MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0)); + + // Instructions which use registers as conditionals examine all the + // bits (as does the pseudo SELECT_CC expansion). I don't think it + // matters much whether it's ZeroOrOneBooleanContent, or + // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the + // former. + setBooleanContents(ZeroOrOneBooleanContent); + setBooleanVectorContents(ZeroOrOneBooleanContent); + + // Set up the register classes. + addRegisterClass(MVT::i32, &VE::I32RegClass); + addRegisterClass(MVT::i64, &VE::I64RegClass); + addRegisterClass(MVT::f32, &VE::F32RegClass); + addRegisterClass(MVT::f64, &VE::I64RegClass); + addRegisterClass(MVT::f128, &VE::F128RegClass); + addRegisterClass(MVT::v512i32, &VE::V64RegClass); + addRegisterClass(MVT::v512f32, &VE::V64RegClass); + addRegisterClass(MVT::v256i32, &VE::V64RegClass); + addRegisterClass(MVT::v256i64, &VE::V64RegClass); + addRegisterClass(MVT::v256f32, &VE::V64RegClass); + addRegisterClass(MVT::v256f64, &VE::V64RegClass); + addRegisterClass(MVT::v128i32, &VE::V64RegClass); + addRegisterClass(MVT::v128i64, &VE::V64RegClass); + addRegisterClass(MVT::v128f32, &VE::V64RegClass); + addRegisterClass(MVT::v128f64, &VE::V64RegClass); + addRegisterClass(MVT::v64i32, &VE::V64RegClass); + addRegisterClass(MVT::v64i64, &VE::V64RegClass); + addRegisterClass(MVT::v64f32, &VE::V64RegClass); + addRegisterClass(MVT::v64f64, &VE::V64RegClass); + addRegisterClass(MVT::v32i32, &VE::V64RegClass); + addRegisterClass(MVT::v32i64, &VE::V64RegClass); + addRegisterClass(MVT::v32f32, &VE::V64RegClass); + addRegisterClass(MVT::v32f64, &VE::V64RegClass); + addRegisterClass(MVT::v16i32, &VE::V64RegClass); + addRegisterClass(MVT::v16i64, &VE::V64RegClass); + addRegisterClass(MVT::v16f32, &VE::V64RegClass); + addRegisterClass(MVT::v16f64, &VE::V64RegClass); + addRegisterClass(MVT::v8i32, &VE::V64RegClass); + addRegisterClass(MVT::v8i64, &VE::V64RegClass); + addRegisterClass(MVT::v8f32, &VE::V64RegClass); + addRegisterClass(MVT::v8f64, &VE::V64RegClass); + addRegisterClass(MVT::v4i32, &VE::V64RegClass); + addRegisterClass(MVT::v4i64, &VE::V64RegClass); + addRegisterClass(MVT::v4f32, &VE::V64RegClass); + addRegisterClass(MVT::v4f64, &VE::V64RegClass); + addRegisterClass(MVT::v2i32, &VE::V64RegClass); + addRegisterClass(MVT::v2i64, &VE::V64RegClass); + addRegisterClass(MVT::v2f32, &VE::V64RegClass); + addRegisterClass(MVT::v2f64, &VE::V64RegClass); + addRegisterClass(MVT::v256i1, &VE::VMRegClass); + addRegisterClass(MVT::v512i1, &VE::VM512RegClass); + + if (Subtarget->vectorize()) { + // We want to use any of vectorization oppotunities in llvm. + // So, try to use llvm's SIMD style vectorizations here. + // + // However, this requires intrinsics with vector mask to use + // following bitcast in order to convert between v4i64/v8i64 and + // v256i1/v512i1 respectively since C doesn't have 1 bit data types. + // + // e.g. (i256i1 (bitcast (v4i64 (llvm.ve.vfmkw.mcv ...)))) + // ^^^^^^^^^^^^^^^ this bitcast is needed + // + addRegisterClass(MVT::v4i64, &VE::V64RegClass); + addRegisterClass(MVT::v8i64, &VE::V64RegClass); + } else { + // FIXME: + // llvm-ve uses v4i64/v8i64 for a mask temporally until llvm supports + // v256i1/v512i1. + addRegisterClass(MVT::v4i64, &VE::VMRegClass); + addRegisterClass(MVT::v8i64, &VE::VM512RegClass); + } + + // Turn FP extload into load/fpextend + for (MVT VT : MVT::fp_valuetypes()) { + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); + } + + // VE doesn't have i1 sign extending load + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + setTruncStoreAction(VT, MVT::i1, Expand); + } + + // Turn FP truncstore into trunc + store. + setTruncStoreAction(MVT::f64, MVT::f32, Expand); + setTruncStoreAction(MVT::f128, MVT::f32, Expand); + setTruncStoreAction(MVT::f128, MVT::f64, Expand); + + // Custom legalize GlobalAddress nodes into LO/HI parts. + setOperationAction(ISD::GlobalAddress, PtrVT, Custom); + setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); + setOperationAction(ISD::ConstantPool, PtrVT, Custom); + setOperationAction(ISD::BlockAddress, PtrVT, Custom); + + // VE has no REM or DIVREM operations. + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + } + + // VE has instructions for fp<->sint, so use them. + + // VE doesn't have instructions for fp<->uint, so expand them by llvm + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote); // use i64 + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); // use i64 + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + + // VE doesn't have BRCOND + setOperationAction(ISD::BRCOND, MVT::Other, Expand); + + // BRIND/BR_JT are not implemented yet. + // FIXME: BRIND instruction is implemented, but JumpTable is not yet. + setOperationAction(ISD::BRIND, MVT::Other, Expand); + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + + setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); + setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); + setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); + if (TM.Options.ExceptionModel == ExceptionHandling::SjLj) + setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); + + setTargetDAGCombine(ISD::FADD); + // setTargetDAGCombine(ISD::FMA); + + // ATOMICs. + // Atomics are supported on VE. + setMaxAtomicSizeInBitsSupported(64); + setMinCmpXchgSizeInBits(32); + setSupportsUnalignedAtomics(false); + + // Use custom inserter, LowerATOMIC_FENCE, for ATOMIC_FENCE. + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + + for (MVT VT : MVT::integer_valuetypes()) { + // Several atomic operations are converted to VE instructions well. + // Additional memory fences are generated in emitLeadingfence and + // emitTrailingFence functions. + setOperationAction(ISD::ATOMIC_LOAD, VT, Legal); + setOperationAction(ISD::ATOMIC_STORE, VT, Legal); + setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Legal); + setOperationAction(ISD::ATOMIC_SWAP, VT, Legal); + + setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); + + // FIXME: not supported "atmam" isntructions yet + setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Expand); + + // VE doesn't have follwing instructions + setOperationAction(ISD::ATOMIC_LOAD_CLR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); + } + + // FIXME: VE's I128 stuff is not investivated yet + if (!1) { + // These libcalls are not available in 32-bit. + setLibcallName(RTLIB::SHL_I128, nullptr); + setLibcallName(RTLIB::SRL_I128, nullptr); + setLibcallName(RTLIB::SRA_I128, nullptr); + } + + for (MVT VT : MVT::fp_valuetypes()) { + // VE has no sclar FMA instruction + setOperationAction(ISD::FMA, VT, Expand); + setOperationAction(ISD::FMAD, VT, Expand); + setOperationAction(ISD::FREM, VT, Expand); + setOperationAction(ISD::FNEG, VT, Expand); + setOperationAction(ISD::FABS, VT, Expand); + setOperationAction(ISD::FSQRT, VT, Expand); + setOperationAction(ISD::FSIN, VT, Expand); + setOperationAction(ISD::FCOS, VT, Expand); + setOperationAction(ISD::FPOWI, VT, Expand); + setOperationAction(ISD::FPOW, VT, Expand); + setOperationAction(ISD::FLOG, VT, Expand); + setOperationAction(ISD::FLOG2, VT, Expand); + setOperationAction(ISD::FLOG10, VT, Expand); + setOperationAction(ISD::FEXP, VT, Expand); + setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FCEIL, VT, Expand); + setOperationAction(ISD::FTRUNC, VT, Expand); + setOperationAction(ISD::FRINT, VT, Expand); + setOperationAction(ISD::FNEARBYINT, VT, Expand); + setOperationAction(ISD::FROUND, VT, Expand); + setOperationAction(ISD::FFLOOR, VT, Expand); + setOperationAction(ISD::FMINNUM, VT, Expand); + setOperationAction(ISD::FMAXNUM, VT, Expand); + setOperationAction(ISD::FMINIMUM, VT, Expand); + setOperationAction(ISD::FMAXIMUM, VT, Expand); + setOperationAction(ISD::FSINCOS, VT, Expand); + } + + // FIXME: VE's FCOPYSIGN is not investivated yet + setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); + setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); + setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); + + // FIXME: VE's SHL_PARTS and others are not investigated yet. + setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + if (1) { + setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); + } + + // Expands to [SU]MUL_LOHI. + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + // setOperationAction(ISD::MUL, MVT::i32, Expand); + + if (1) { + setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); + setOperationAction(ISD::MULHU, MVT::i64, Expand); + setOperationAction(ISD::MULHS, MVT::i64, Expand); + + setOperationAction(ISD::UMULO, MVT::i64, Custom); + setOperationAction(ISD::SMULO, MVT::i64, Custom); + } + + // FIXME: temporary disabling Custom BITCAST since such BITCAST + // is generated by only LowerBUILD_VECTOR temporary disabled. +#if 0 + // Bits operations + setOperationAction(ISD::BITCAST, MVT::v256i64, Custom); +#endif + + setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); + setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); + setOperationAction(ISD::BSWAP, MVT::i32, Legal); + setOperationAction(ISD::BSWAP, MVT::i64, Legal); + setOperationAction(ISD::CTPOP, MVT::i32, Legal); + setOperationAction(ISD::CTPOP, MVT::i64, Legal); + setOperationAction(ISD::CTLZ, MVT::i32, Legal); + setOperationAction(ISD::CTLZ, MVT::i64, Legal); + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ, MVT::i64, Expand); + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i64, Expand); + setOperationAction(ISD::ROTR, MVT::i32, Expand); + setOperationAction(ISD::ROTR, MVT::i64, Expand); + + // VASTART needs to be custom lowered to use the VarArgsFrameIndex. + setOperationAction(ISD::VASTART, MVT::Other, Custom); + // VAARG needs to be lowered to access with 8 bytes alignment. + setOperationAction(ISD::VAARG, MVT::Other, Custom); + + // Use the default implementation. + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + + // Expand DYNAMIC_STACKALLOC + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); + + // LOAD/STORE for f128 needs to be custom lowered to expand two loads/stores + setOperationAction(ISD::LOAD, MVT::f128, Custom); + setOperationAction(ISD::STORE, MVT::f128, Custom); + + for (MVT VT : MVT::vector_valuetypes()) { + if (VT.getVectorElementType() == MVT::i1 || + VT.getVectorElementType() == MVT::i8 || + VT.getVectorElementType() == MVT::i16) { + // VE uses vXi1 types but has no generic operations. + // VE doesn't support vXi8 and vXi16 value types. + // So, we mark them all as expanded. + + // Expand all vector-i8/i16-vector truncstore and extload + for (MVT OuterVT : MVT::vector_valuetypes()) { + setTruncStoreAction(OuterVT, VT, Expand); + setLoadExtAction(ISD::SEXTLOAD, OuterVT, VT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, OuterVT, VT, Expand); + setLoadExtAction(ISD::EXTLOAD, OuterVT, VT, Expand); + } + // SExt i1 and ZExt i1 are legal. + if (VT.getVectorElementType() == MVT::i1) { + setOperationAction(ISD::SIGN_EXTEND, VT, Legal); + setOperationAction(ISD::ZERO_EXTEND, VT, Legal); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + } else { + setOperationAction(ISD::SIGN_EXTEND, VT, Expand); + setOperationAction(ISD::ZERO_EXTEND, VT, Expand); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); + } + + // STORE for vXi1 needs to be custom lowered to expand multiple + // instructions. + if (VT.getVectorElementType() == MVT::i1) + setOperationAction(ISD::STORE, VT, Custom); + + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); + setOperationAction(ISD::BUILD_VECTOR, VT, Expand); + setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); + setOperationAction(ISD::INSERT_SUBVECTOR, VT, Expand); + setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); + + setOperationAction(ISD::FP_EXTEND, VT, Expand); + setOperationAction(ISD::FP_ROUND, VT, Expand); + + setOperationAction(ISD::FABS, VT, Expand); + setOperationAction(ISD::FNEG, VT, Expand); + setOperationAction(ISD::FADD, VT, Expand); + setOperationAction(ISD::FSUB, VT, Expand); + setOperationAction(ISD::FMUL, VT, Expand); + setOperationAction(ISD::FDIV, VT, Expand); + setOperationAction(ISD::ADD, VT, Expand); + setOperationAction(ISD::SUB, VT, Expand); + setOperationAction(ISD::MUL, VT, Expand); + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + + setOperationAction(ISD::SHL, VT, Expand); + + setOperationAction(ISD::MSCATTER, VT, Expand); + setOperationAction(ISD::MGATHER, VT, Expand); + setOperationAction(ISD::MLOAD, VT, Expand); + + // VE vector unit supports only setcc and vselect + setOperationAction(ISD::SELECT_CC, VT, Expand); + + // VE doesn't have instructions for fp<->uint, so expand them by llvm + setOperationAction(ISD::FP_TO_UINT, VT, Promote); // use i64 + setOperationAction(ISD::UINT_TO_FP, VT, Promote); // use i64 + } else { + setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal); + setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::BUILD_VECTOR, VT, Custom); + setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); + setOperationAction(ISD::INSERT_SUBVECTOR, VT, Expand); + setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); + + setOperationAction(ISD::FP_EXTEND, VT, Legal); + setOperationAction(ISD::FP_ROUND, VT, Legal); + + // currently unsupported math functions + setOperationAction(ISD::FABS, VT, Expand); + + // supported calculations + setOperationAction(ISD::FNEG, VT, Legal); + setOperationAction(ISD::FADD, VT, Legal); + setOperationAction(ISD::FSUB, VT, Legal); + setOperationAction(ISD::FMUL, VT, Legal); + setOperationAction(ISD::FDIV, VT, Legal); + setOperationAction(ISD::ADD, VT, Legal); + setOperationAction(ISD::SUB, VT, Legal); + setOperationAction(ISD::MUL, VT, Legal); + setOperationAction(ISD::SDIV, VT, Legal); + setOperationAction(ISD::UDIV, VT, Legal); + + setOperationAction(ISD::SHL, VT, Legal); + + setOperationAction(ISD::MSCATTER, VT, Custom); + setOperationAction(ISD::MGATHER, VT, Custom); + setOperationAction(ISD::MLOAD, VT, Custom); + + // VE vector unit supports only setcc and vselect + setOperationAction(ISD::SELECT_CC, VT, Expand); + + // VE doesn't have instructions for fp<->uint, so expand them by llvm + if (VT.getVectorElementType() == MVT::i32) { + setOperationAction(ISD::FP_TO_UINT, VT, Promote); // use i64 + setOperationAction(ISD::UINT_TO_FP, VT, Promote); // use i64 + } else { + setOperationAction(ISD::FP_TO_UINT, VT, Expand); + setOperationAction(ISD::UINT_TO_FP, VT, Expand); + } + } + } + + // VE has no packed MUL, SDIV, or UDIV operations. + for (MVT VT : {MVT::v512i32, MVT::v512f32}) { + setOperationAction(ISD::MUL, VT, Expand); + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + } + + // VE has no REM or DIVREM operations. + for (MVT VT : MVT::vector_valuetypes()) { + setOperationAction(ISD::UREM, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + } + + // VE has FAQ, FSQ, FMQ, and FCQ + setOperationAction(ISD::FADD, MVT::f128, Legal); + setOperationAction(ISD::FSUB, MVT::f128, Legal); + setOperationAction(ISD::FMUL, MVT::f128, Legal); + setOperationAction(ISD::FDIV, MVT::f128, Expand); + setOperationAction(ISD::FSQRT, MVT::f128, Expand); + setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); + setOperationAction(ISD::FP_ROUND, MVT::f128, Legal); + + // Other configurations related to f128. + setOperationAction(ISD::SELECT, MVT::f128, Legal); + setOperationAction(ISD::SELECT_CC, MVT::f128, Legal); + setOperationAction(ISD::SETCC, MVT::f128, Legal); + setOperationAction(ISD::BR_CC, MVT::f128, Legal); + + setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + + // TRAP to expand (which turns it into abort). + setOperationAction(ISD::TRAP, MVT::Other, Expand); + + // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" + // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. + setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); + + // vector fma // TESTING + for (MVT VT : MVT::vector_valuetypes()) { + setOperationAction(ISD::FMA, VT, Legal); + // setOperationAction(ISD::FMAD, VT, Legal); + setOperationAction(ISD::FMAD, VT, Expand); + setOperationAction(ISD::FREM, VT, Expand); + setOperationAction(ISD::FNEG, VT, Expand); + setOperationAction(ISD::FABS, VT, Expand); + setOperationAction(ISD::FSQRT, VT, Expand); + setOperationAction(ISD::FSIN, VT, Expand); + setOperationAction(ISD::FCOS, VT, Expand); + setOperationAction(ISD::FPOWI, VT, Expand); + setOperationAction(ISD::FPOW, VT, Expand); + setOperationAction(ISD::FLOG, VT, Expand); + setOperationAction(ISD::FLOG2, VT, Expand); + setOperationAction(ISD::FLOG10, VT, Expand); + setOperationAction(ISD::FEXP, VT, Expand); + setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FCEIL, VT, Expand); + setOperationAction(ISD::FTRUNC, VT, Expand); + setOperationAction(ISD::FRINT, VT, Expand); + setOperationAction(ISD::FNEARBYINT, VT, Expand); + setOperationAction(ISD::FROUND, VT, Expand); + setOperationAction(ISD::FFLOOR, VT, Expand); + setOperationAction(ISD::FMINNUM, VT, Expand); + setOperationAction(ISD::FMAXNUM, VT, Expand); + setOperationAction(ISD::FMINIMUM, VT, Expand); + setOperationAction(ISD::FMAXIMUM, VT, Expand); + setOperationAction(ISD::FSINCOS, VT, Expand); + } + + setStackPointerRegisterToSaveRestore(VE::SX11); + + // Set function alignment to 16 bytes + setMinFunctionAlignment(Align(16)); + + // VE stores all argument by 8 bytes alignment + setMinStackArgumentAlignment(Align(8)); + + computeRegisterProperties(Subtarget->getRegisterInfo()); +} + +const char *VETargetLowering::getTargetNodeName(unsigned Opcode) const { + switch ((VEISD::NodeType)Opcode) { + case VEISD::FIRST_NUMBER: + break; + case VEISD::CMPICC: + return "VEISD::CMPICC"; + case VEISD::CMPFCC: + return "VEISD::CMPFCC"; + case VEISD::BRICC: + return "VEISD::BRICC"; + case VEISD::BRXCC: + return "VEISD::BRXCC"; + case VEISD::BRFCC: + return "VEISD::BRFCC"; + case VEISD::SELECT: + return "VEISD::SELECT"; + case VEISD::SELECT_ICC: + return "VEISD::SELECT_ICC"; + case VEISD::SELECT_XCC: + return "VEISD::SELECT_XCC"; + case VEISD::SELECT_FCC: + return "VEISD::SELECT_FCC"; + case VEISD::EH_SJLJ_SETJMP: + return "VEISD::EH_SJLJ_SETJMP"; + case VEISD::EH_SJLJ_LONGJMP: + return "VEISD::EH_SJLJ_LONGJMP"; + case VEISD::EH_SJLJ_SETUP_DISPATCH: + return "VEISD::EH_SJLJ_SETUP_DISPATCH"; + case VEISD::Hi: + return "VEISD::Hi"; + case VEISD::Lo: + return "VEISD::Lo"; + case VEISD::FTOI: + return "VEISD::FTOI"; + case VEISD::ITOF: + return "VEISD::ITOF"; + case VEISD::FTOX: + return "VEISD::FTOX"; + case VEISD::XTOF: + return "VEISD::XTOF"; + case VEISD::MAX: + return "VEISD::MAX"; + case VEISD::MIN: + return "VEISD::MIN"; + case VEISD::FMAX: + return "VEISD::FMAX"; + case VEISD::FMIN: + return "VEISD::FMIN"; + case VEISD::GETFUNPLT: + return "VEISD::GETFUNPLT"; + case VEISD::GETSTACKTOP: + return "VEISD::GETSTACKTOP"; + case VEISD::GETTLSADDR: + return "VEISD::GETTLSADDR"; + case VEISD::MEMBARRIER: + return "VEISD::MEMBARRIER"; + case VEISD::CALL: + return "VEISD::CALL"; + case VEISD::RET_FLAG: + return "VEISD::RET_FLAG"; + case VEISD::GLOBAL_BASE_REG: + return "VEISD::GLOBAL_BASE_REG"; + case VEISD::FLUSHW: + return "VEISD::FLUSHW"; + case VEISD::VEC_BROADCAST: + return "VEISD::VEC_BROADCAST"; + case VEISD::VEC_LVL: + return "VEISD::VEC_LVL"; + case VEISD::VEC_SEQ: + return "VEISD::VEC_SEQ"; + case VEISD::VEC_VMV: + return "VEISD::VEC_VMV"; + case VEISD::VEC_SCATTER: + return "VEISD::VEC_SCATTER"; + case VEISD::VEC_GATHER: + return "VEISD::VEC_GATHER"; + case VEISD::Wrapper: + return "VEISD::Wrapper"; + } + return nullptr; +} + +EVT VETargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, + EVT VT) const { + if (!VT.isVector()) + return MVT::i32; + return VT.changeVectorElementTypeToInteger(); +} + +/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to +/// be zero. Op is expected to be a target specific node. Used by DAG +/// combiner. +void VETargetLowering::computeKnownBitsForTargetNode(const SDValue Op, + KnownBits &Known, + const APInt &DemandedElts, + const SelectionDAG &DAG, + unsigned Depth) const { + KnownBits Known2; + Known.resetAll(); + + switch (Op.getOpcode()) { + default: + break; + case VEISD::SELECT_ICC: + case VEISD::SELECT_XCC: + case VEISD::SELECT_FCC: + Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1); + Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); + + // Only known if known in both the LHS and RHS. + Known.One &= Known2.One; + Known.Zero &= Known2.Zero; + break; + } +} + +// Convert to a target node and set target flags. +SDValue VETargetLowering::withTargetFlags(SDValue Op, unsigned TF, + SelectionDAG &DAG) const { + if (const GlobalAddressSDNode *GA = dyn_cast(Op)) + return DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(GA), + GA->getValueType(0), GA->getOffset(), TF); + + if (const ConstantPoolSDNode *CP = dyn_cast(Op)) + return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0), + CP->getAlignment(), CP->getOffset(), TF); + + if (const BlockAddressSDNode *BA = dyn_cast(Op)) + return DAG.getTargetBlockAddress(BA->getBlockAddress(), Op.getValueType(), + 0, TF); + + if (const ExternalSymbolSDNode *ES = dyn_cast(Op)) + return DAG.getTargetExternalSymbol(ES->getSymbol(), ES->getValueType(0), + TF); + + llvm_unreachable("Unhandled address SDNode"); +} + +// Split Op into high and low parts according to HiTF and LoTF. +// Return an ADD node combining the parts. +SDValue VETargetLowering::makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + SDValue Hi = DAG.getNode(VEISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG)); + SDValue Lo = DAG.getNode(VEISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG)); + return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo); +} + +// Build SDNodes for producing an address from a GlobalAddress, ConstantPool, +// or ExternalSymbol SDNode. +SDValue VETargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = getPointerTy(DAG.getDataLayout()); + + // Handle PIC mode first. SPARC needs a got load for every variable! + if (isPositionIndependent()) { + // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this + // function has calls. + MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + MFI.setHasCalls(true); + + if (dyn_cast(Op) != nullptr || + (dyn_cast(Op) != nullptr && + dyn_cast(Op)->getGlobal()->hasLocalLinkage())) { + // Create following instructions for local linkage PIC code. + // lea %s35, %gotoff_lo(.LCPI0_0) + // and %s35, %s35, (32)0 + // lea.sl %s35, %gotoff_hi(.LCPI0_0)(%s35) + // adds.l %s35, %s15, %s35 ; %s15 is GOT + // FIXME: use lea.sl %s35, %gotoff_hi(.LCPI0_0)(%s35, %s15) + SDValue HiLo = makeHiLoPair(Op, VEMCExpr::VK_VE_GOTOFF_HI32, + VEMCExpr::VK_VE_GOTOFF_LO32, DAG); + SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, VT); + return DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); + } else { + // Create following instructions for not local linkage PIC code. + // lea %s35, %got_lo(.LCPI0_0) + // and %s35, %s35, (32)0 + // lea.sl %s35, %got_hi(.LCPI0_0)(%s35) + // adds.l %s35, %s15, %s35 ; %s15 is GOT + // ld %s35, (,%s35) + // FIXME: use lea.sl %s35, %gotoff_hi(.LCPI0_0)(%s35, %s15) + SDValue HiLo = makeHiLoPair(Op, VEMCExpr::VK_VE_GOT_HI32, + VEMCExpr::VK_VE_GOT_LO32, DAG); + SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, DL, VT); + SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo); + return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr, + MachinePointerInfo::getGOT(DAG.getMachineFunction())); + } + } + + // This is one of the absolute code models. + switch (getTargetMachine().getCodeModel()) { + default: + llvm_unreachable("Unsupported absolute code model"); + case CodeModel::Small: + case CodeModel::Medium: + case CodeModel::Large: + // abs64. + return makeHiLoPair(Op, VEMCExpr::VK_VE_HI32, VEMCExpr::VK_VE_LO32, DAG); + } +} + +SDValue VETargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + return makeAddress(Op, DAG); +} + +SDValue VETargetLowering::LowerConstantPool(SDValue Op, + SelectionDAG &DAG) const { + return makeAddress(Op, DAG); +} + +SDValue VETargetLowering::LowerBlockAddress(SDValue Op, + SelectionDAG &DAG) const { + return makeAddress(Op, DAG); +} + +SDValue +VETargetLowering::LowerToTLSGeneralDynamicModel(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + + // Generate following code: + // t1: ch,glue = callseq_start t0, 0, 0 + // t2: i64,ch,glue = VEISD::GETTLSADDR t1, label, t1:1 + // t3: ch,glue = callseq_end t2, 0, 0, t2:2 + // t4: i64,ch,glue = CopyFromReg t3, Register:i64 $sx0, t3:1 + SDValue Label = withTargetFlags(Op, 0, DAG); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + // Lowering the machine isd will make sure everything is in the right + // location. + SDValue Chain = DAG.getEntryNode(); + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask( + DAG.getMachineFunction(), CallingConv::C); + Chain = DAG.getCALLSEQ_START(Chain, 64, 0, dl); + SDValue Args[] = {Chain, Label, DAG.getRegisterMask(Mask), Chain.getValue(1)}; + Chain = DAG.getNode(VEISD::GETTLSADDR, dl, NodeTys, Args); + Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(64, dl, true), + DAG.getIntPtrConstant(0, dl, true), + Chain.getValue(1), dl); + Chain = DAG.getCopyFromReg(Chain, dl, VE::SX0, PtrVT, Chain.getValue(1)); + + // GETTLSADDR will be codegen'ed as call. Inform MFI that function has calls. + MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + MFI.setHasCalls(true); + + // Also generate code to prepare a GOT register if it is PIC. + if (isPositionIndependent()) { + MachineFunction &MF = DAG.getMachineFunction(); + Subtarget->getInstrInfo()->getGlobalBaseReg(&MF); + } + + return Chain; +} + +SDValue VETargetLowering::LowerToTLSLocalExecModel(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + // Generate following code: + // lea %s0, Op@tpoff_lo + // and %s0, %s0, (32)0 + // lea.sl %s0, Op@tpoff_hi(%s0) + // add %s0, %s0, %tp + // FIXME: use lea.sl %s0, Op@tpoff_hi(%tp, %s0) for better performance + SDValue HiLo = makeHiLoPair(Op, VEMCExpr::VK_VE_TPOFF_HI32, + VEMCExpr::VK_VE_TPOFF_LO32, DAG); + return DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(VE::SX14, PtrVT), + HiLo); +} + +SDValue VETargetLowering::LowerGlobalTLSAddress(SDValue Op, + SelectionDAG &DAG) const { + // Current implementation of nld doesn't allow local exec model code + // described in VE-tls_v1.1.pdf (*1) as its input. The nld accept + // only general dynamic model and optimize it whenever. So, here + // we need to generate only general dynamic model code sequence. + // + // *1: https://www.nec.com/en/global/prod/hpc/aurora/document/VE-tls_v1.1.pdf + return LowerToTLSGeneralDynamicModel(Op, DAG); +} + +SDValue VETargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + return DAG.getNode(VEISD::EH_SJLJ_SETJMP, dl, + DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), + Op.getOperand(1)); +} + +SDValue VETargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + return DAG.getNode(VEISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), + Op.getOperand(1)); +} + +SDValue VETargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + return DAG.getNode(VEISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, + Op.getOperand(0)); +} + +static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, + const VETargetLowering &TLI) { + MachineFunction &MF = DAG.getMachineFunction(); + VEMachineFunctionInfo *FuncInfo = MF.getInfo(); + auto PtrVT = TLI.getPointerTy(DAG.getDataLayout()); + + // Need frame address to find the address of VarArgsFrameIndex. + MF.getFrameInfo().setFrameAddressIsTaken(true); + + // vastart just stores the address of the VarArgsFrameIndex slot into the + // memory location argument. + SDLoc DL(Op); + SDValue Offset = + DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(VE::SX9, PtrVT), + DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL)); + const Value *SV = cast(Op.getOperand(2))->getValue(); + return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1), + MachinePointerInfo(SV)); +} + +static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { + SDNode *Node = Op.getNode(); + EVT VT = Node->getValueType(0); + SDValue InChain = Node->getOperand(0); + SDValue VAListPtr = Node->getOperand(1); + EVT PtrVT = VAListPtr.getValueType(); + const Value *SV = cast(Node->getOperand(2))->getValue(); + SDLoc DL(Node); + SDValue VAList = + DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV)); + SDValue Chain = VAList.getValue(1); + SDValue NextPtr; + + if (VT == MVT::f128) { + // Alignment + int Align = 16; + VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, + DAG.getConstant(Align - 1, DL, PtrVT)); + VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, + DAG.getConstant(-Align, DL, PtrVT)); + // Increment the pointer, VAList, by 16 to the next vaarg. + NextPtr = + DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(16, DL)); + } else if (VT == MVT::f32) { + // float --> need special handling like below. + // 0 4 + // +------+------+ + // | empty| float| + // +------+------+ + // Increment the pointer, VAList, by 8 to the next vaarg. + NextPtr = + DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(8, DL)); + // Then, adjust VAList. + unsigned InternalOffset = 4; + VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, + DAG.getConstant(InternalOffset, DL, PtrVT)); + } else { + // Increment the pointer, VAList, by 8 to the next vaarg. + NextPtr = + DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getIntPtrConstant(8, DL)); + } + + // Store the incremented VAList to the legalized pointer. + InChain = DAG.getStore(Chain, DL, NextPtr, VAListPtr, MachinePointerInfo(SV)); + + // Load the actual argument out of the pointer VAList. + // We can't count on greater alignment than the word size. + return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), + std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8); +} + +SDValue VETargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, + SelectionDAG &DAG) const { + // Generate following code. + // (void)__llvm_grow_stack(size); + // ret = GETSTACKTOP; // pseudo instruction + SDLoc dl(Op); + + SDValue Size = Op.getOperand(1); // Legalize the size. + EVT VT = Size->getValueType(0); + + // Prepare arguments + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + Entry.Node = Size; + Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); + Args.push_back(Entry); + Type *RetTy = Type::getVoidTy(*DAG.getContext()); + + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + SDValue Callee = DAG.getTargetExternalSymbol("__llvm_grow_stack", PtrVT, 0); + + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(dl) + .setChain(DAG.getEntryNode()) + .setCallee(CallingConv::VE_LLVM_GROW_STACK, RetTy, Callee, + std::move(Args)) + .setDiscardResult(true); + std::pair pair = LowerCallTo(CLI); + SDValue Chain = pair.second; + SDValue Value = DAG.getNode(VEISD::GETSTACKTOP, dl, VT, Chain); + SDValue Ops[2] = {Value, Chain}; + return DAG.getMergeValues(Ops, dl); +} + +static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, + const VETargetLowering &TLI, + const VESubtarget *Subtarget) { + SDLoc dl(Op); + unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + MFI.setFrameAddressIsTaken(true); + + EVT PtrVT = TLI.getPointerTy(MF.getDataLayout()); + + // Naked functions never have a frame pointer, and so we use r1. For all + // other functions, this decision must be delayed until during PEI. + const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo(); + unsigned FrameReg = RegInfo->getFrameRegister(MF); + + SDValue FrameAddr = + DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); + while (Depth--) + FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), + FrameAddr, MachinePointerInfo()); + return FrameAddr; +} + +static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, + const VETargetLowering &TLI, + const VESubtarget *Subtarget) { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + MFI.setReturnAddressIsTaken(true); + + if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG)) + return SDValue(); + + SDLoc dl(Op); + unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + + auto PtrVT = TLI.getPointerTy(MF.getDataLayout()); + + if (Depth > 0) { + SDValue FrameAddr = LowerFRAMEADDR(Op, DAG, TLI, Subtarget); + SDValue Offset = DAG.getConstant(8, dl, MVT::i64); + return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), + DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), + MachinePointerInfo()); + } + + // Just load the return address off the stack. + SDValue RetAddrFI = DAG.getFrameIndex(1, PtrVT); + return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, + MachinePointerInfo()); +} + +// Lower a f128 load into two f64 loads. +static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG) { + SDLoc dl(Op); + LoadSDNode *LdNode = dyn_cast(Op.getNode()); + assert(LdNode && LdNode->getOffset().isUndef() && "Unexpected node type"); + + SDValue BasePtr = LdNode->getBasePtr(); + if (dyn_cast(BasePtr.getNode())) { + // For the case of frame index, expanding it here cause dependency + // problem. So, treat it as a legal and expand it in eliminateFrameIndex + return Op; + } + + unsigned alignment = LdNode->getAlignment(); + if (alignment > 8) + alignment = 8; + + SDValue Lo64 = + DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(), + LdNode->getPointerInfo(), alignment, + LdNode->isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone); + EVT addrVT = LdNode->getBasePtr().getValueType(); + SDValue HiPtr = DAG.getNode(ISD::ADD, dl, addrVT, LdNode->getBasePtr(), + DAG.getConstant(8, dl, addrVT)); + SDValue Hi64 = + DAG.getLoad(MVT::f64, dl, LdNode->getChain(), HiPtr, + LdNode->getPointerInfo(), alignment, + LdNode->isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone); + + SDValue SubRegEven = DAG.getTargetConstant(VE::sub_even, dl, MVT::i32); + SDValue SubRegOdd = DAG.getTargetConstant(VE::sub_odd, dl, MVT::i32); + + // VE stores Hi64 to 8(addr) and Lo64 to 0(addr) + SDNode *InFP128 = + DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::f128); + InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, MVT::f128, + SDValue(InFP128, 0), Hi64, SubRegEven); + InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, MVT::f128, + SDValue(InFP128, 0), Lo64, SubRegOdd); + SDValue OutChains[2] = {SDValue(Lo64.getNode(), 1), + SDValue(Hi64.getNode(), 1)}; + SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); + SDValue Ops[2] = {SDValue(InFP128, 0), OutChain}; + return DAG.getMergeValues(Ops, dl); +} + +static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) { + LoadSDNode *LdNode = cast(Op.getNode()); + + EVT MemVT = LdNode->getMemoryVT(); + if (MemVT == MVT::f128) + return LowerF128Load(Op, DAG); + + return Op; +} + +// Lower a f128 store into two f64 stores. +static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) { + SDLoc dl(Op); + StoreSDNode *StNode = dyn_cast(Op.getNode()); + assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type"); + + SDValue BasePtr = StNode->getBasePtr(); + if (dyn_cast(BasePtr.getNode())) { + // For the case of frame index, expanding it here cause dependency + // problem. So, treat it as a legal and expand it in eliminateFrameIndex + return Op; + } + + SDValue SubRegEven = DAG.getTargetConstant(VE::sub_even, dl, MVT::i32); + SDValue SubRegOdd = DAG.getTargetConstant(VE::sub_odd, dl, MVT::i32); + + SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i64, + StNode->getValue(), SubRegEven); + SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i64, + StNode->getValue(), SubRegOdd); + + unsigned alignment = StNode->getAlignment(); + if (alignment > 8) + alignment = 8; + + // VE stores Hi64 to 8(addr) and Lo64 to 0(addr) + SDValue OutChains[2]; + OutChains[0] = + DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), + StNode->getBasePtr(), MachinePointerInfo(), alignment, + StNode->isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone); + EVT addrVT = StNode->getBasePtr().getValueType(); + SDValue HiPtr = DAG.getNode(ISD::ADD, dl, addrVT, StNode->getBasePtr(), + DAG.getConstant(8, dl, addrVT)); + OutChains[1] = + DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0), HiPtr, + MachinePointerInfo(), alignment, + StNode->isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone); + return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); +} + +// Lower a vXi1 store into following instructions +// SVMi %1, %vm, 0 +// STSri %1, (,%addr) +// SVMi %2, %vm, 1 +// STSri %2, 8(,%addr) +// ... +static SDValue LowerI1Store(SDValue Op, SelectionDAG &DAG) { + SDLoc dl(Op); + StoreSDNode *StNode = dyn_cast(Op.getNode()); + assert(StNode && StNode->getOffset().isUndef() && "Unexpected node type"); + + SDValue BasePtr = StNode->getBasePtr(); + if (dyn_cast(BasePtr.getNode())) { + // For the case of frame index, expanding it here cause dependency + // problem. So, treat it as a legal and expand it in eliminateFrameIndex + return Op; + } + + unsigned alignment = StNode->getAlignment(); + if (alignment > 8) + alignment = 8; + EVT addrVT = BasePtr.getValueType(); + EVT MemVT = StNode->getMemoryVT(); + if (MemVT == MVT::v256i1) { + SDValue OutChains[4]; + for (int i = 0; i < 4; ++i) { + SDNode *V = + DAG.getMachineNode(VE::svm_smI, dl, MVT::i64, StNode->getValue(), + DAG.getTargetConstant(i, dl, MVT::i64)); + SDValue Addr = DAG.getNode(ISD::ADD, dl, addrVT, BasePtr, + DAG.getConstant(8 * i, dl, addrVT)); + OutChains[i] = + DAG.getStore(StNode->getChain(), dl, SDValue(V, 0), Addr, + MachinePointerInfo(), alignment, + StNode->isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone); + } + return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); + } else if (MemVT == MVT::v512i1) { + SDValue OutChains[8]; + for (int i = 0; i < 8; ++i) { + SDNode *V = + DAG.getMachineNode(VE::svm_sMI, dl, MVT::i64, StNode->getValue(), + DAG.getTargetConstant(i, dl, MVT::i64)); + SDValue Addr = DAG.getNode(ISD::ADD, dl, addrVT, BasePtr, + DAG.getConstant(8 * i, dl, addrVT)); + OutChains[i] = + DAG.getStore(StNode->getChain(), dl, SDValue(V, 0), Addr, + MachinePointerInfo(), alignment, + StNode->isVolatile() ? MachineMemOperand::MOVolatile + : MachineMemOperand::MONone); + } + return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); + } else { + // Otherwise, ask llvm to expand it. + return SDValue(); + } +} + +static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) { + SDLoc dl(Op); + StoreSDNode *St = cast(Op.getNode()); + + EVT MemVT = St->getMemoryVT(); + if (MemVT == MVT::f128) + return LowerF128Store(Op, DAG); + if (MemVT == MVT::v256i1 || MemVT == MVT::v512i1) + return LowerI1Store(Op, DAG); + + // Otherwise, ask llvm to expand it. + return SDValue(); +} + +// Custom lower UMULO/SMULO for VE. This code is similar to ExpandNode() +// in LegalizeDAG.cpp except the order of arguments to the library function. +static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, + const VETargetLowering &TLI) { + unsigned opcode = Op.getOpcode(); + assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode."); + + bool isSigned = (opcode == ISD::SMULO); + EVT VT = MVT::i64; + EVT WideVT = MVT::i128; + SDLoc dl(Op); + SDValue LHS = Op.getOperand(0); + + if (LHS.getValueType() != VT) + return Op; + + SDValue ShiftAmt = DAG.getConstant(63, dl, VT); + + SDValue RHS = Op.getOperand(1); + SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); + SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); + SDValue Args[] = {LHS, HiLHS, RHS, HiRHS}; + + TargetLowering::MakeLibCallOptions CallOptions; + CallOptions.setSExt(isSigned); + SDValue MulResult = + TLI.makeLibCall(DAG, RTLIB::MUL_I128, WideVT, Args, CallOptions, dl) + .first; + SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, MulResult, + DAG.getIntPtrConstant(0, dl)); + SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, MulResult, + DAG.getIntPtrConstant(1, dl)); + if (isSigned) { + SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); + TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE); + } else { + TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT), + ISD::SETNE); + } + // MulResult is a node with an illegal type. Because such things are not + // generally permitted during this phase of legalization, ensure that + // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have + // been folded. + assert(MulResult->use_empty() && "Illegally typed node still in use!"); + + SDValue Ops[2] = {BottomHalf, TopHalf}; + return DAG.getMergeValues(Ops, dl); +} + +SDValue VETargetLowering::LowerATOMIC_FENCE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + AtomicOrdering FenceOrdering = static_cast( + cast(Op.getOperand(1))->getZExtValue()); + SyncScope::ID FenceSSID = static_cast( + cast(Op.getOperand(2))->getZExtValue()); + + // VE uses Release consistency, so need a fence instruction if it is a + // cross-thread fence. + if (FenceSSID == SyncScope::System) { + switch (FenceOrdering) { + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + // No need to generate fencem instruction here. + break; + case AtomicOrdering::Acquire: + // Generate "fencem 2" as acquire fence. + return SDValue( + DAG.getMachineNode(VE::FENCEload, DL, MVT::Other, Op.getOperand(0)), + 0); + case AtomicOrdering::Release: + // Generate "fencem 1" as release fence. + return SDValue( + DAG.getMachineNode(VE::FENCEstore, DL, MVT::Other, Op.getOperand(0)), + 0); + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::SequentiallyConsistent: + // Generate "fencem 3" as acq_rel and seq_cst fence. + // FIXME: "fencem 3" doesn't wait for for PCIe deveices accesses, + // so seq_cst may require more instruction for them. + return SDValue(DAG.getMachineNode(VE::FENCEloadstore, DL, MVT::Other, + Op.getOperand(0)), + 0); + } + } + + // MEMBARRIER is a compiler barrier; it codegens to a no-op. + return DAG.getNode(VEISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); +} + +static Instruction *callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { + Module *M = Builder.GetInsertBlock()->getParent()->getParent(); + Function *Func = Intrinsic::getDeclaration(M, Id); + return Builder.CreateCall(Func, {}); +} + +Instruction *VETargetLowering::emitLeadingFence(IRBuilder<> &Builder, + Instruction *Inst, + AtomicOrdering Ord) const { + switch (Ord) { + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: + llvm_unreachable("Invalid fence: unordered/non-atomic"); + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: + return nullptr; // Nothing to do + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: + return callIntrinsic(Builder, Intrinsic::ve_fencem1); + case AtomicOrdering::SequentiallyConsistent: + if (!Inst->hasAtomicStore()) + return nullptr; // Nothing to do + return callIntrinsic(Builder, Intrinsic::ve_fencem3); + } + llvm_unreachable("Unknown fence ordering in emitLeadingFence"); +} + +Instruction *VETargetLowering::emitTrailingFence(IRBuilder<> &Builder, + Instruction *Inst, + AtomicOrdering Ord) const { + switch (Ord) { + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: + llvm_unreachable("Invalid fence: unordered/not-atomic"); + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: + return nullptr; // Nothing to do + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + return callIntrinsic(Builder, Intrinsic::ve_fencem2); + case AtomicOrdering::SequentiallyConsistent: + return callIntrinsic(Builder, Intrinsic::ve_fencem3); + } + llvm_unreachable("Unknown fence ordering in emitTrailingFence"); +} + +SDValue VETargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); + switch (IntNo) { + default: + return SDValue(); // Don't custom lower most intrinsics. + case Intrinsic::thread_pointer: { + report_fatal_error("Intrinsic::thread_point is not implemented yet"); + } + case Intrinsic::eh_sjlj_lsda: { + MachineFunction &MF = DAG.getMachineFunction(); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); + const VETargetMachine *TM = + static_cast(&DAG.getTarget()); + + // Creat GCC_except_tableXX string. The real symbol for that will be + // generated in EHStreamer::emitExceptionTable() later. So, we just + // borrow it's name here. + TM->getStrList()->push_back(std::string( + (Twine("GCC_except_table") + Twine(MF.getFunctionNumber())).str())); + SDValue Addr = + DAG.getTargetExternalSymbol(TM->getStrList()->back().c_str(), PtrVT, 0); + if (isPositionIndependent()) { + Addr = makeHiLoPair(Addr, VEMCExpr::VK_VE_GOTOFF_HI32, + VEMCExpr::VK_VE_GOTOFF_LO32, DAG); + SDValue GlobalBase = DAG.getNode(VEISD::GLOBAL_BASE_REG, dl, PtrVT); + return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalBase, Addr); + } else { + return makeHiLoPair(Addr, VEMCExpr::VK_VE_HI32, VEMCExpr::VK_VE_LO32, + DAG); + } + } + } +} + +SDValue VETargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + switch (IntNo) { + default: + return SDValue(); // Don't custom lower most intrinsics. + } +} + +SDValue VETargetLowering::LowerINTRINSIC_VOID(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + switch (IntNo) { + default: + return SDValue(); // Don't custom lower most intrinsics. + } +} + +// Should we expand the build vector with shuffles? +bool VETargetLowering::shouldExpandBuildVectorWithShuffles( + EVT VT, unsigned DefinedValues) const { +#if 1 + // FIXME: Change this to true or expression once we implement custom + // expansion of VECTOR_SHUFFLE completely. + + // Not use VECTOR_SHUFFLE to expand BUILD_VECTOR atm. Because, it causes + // infinity expand loop between both instructions since VECTOR_SHUFFLE + // is not implemented completely yet. + return false; +#else + return DefinedValues < 3; +#endif +} + +SDValue VETargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!"); + EVT VT = Op.getOperand(0).getValueType(); + + // Special treatements for packed V64 types. + if (VT == MVT::v512i32 || VT == MVT::v512f32) { + // Example of codes: + // %packed_v = extractelt %vr, %idx / 2 + // %packed_v &= 0xffffffff << ((%idx % 2) ? 0 : 32) + // %packed_v |= %val << (%idx % 2 * 32) + // %vr = insertelt %vr, %packed_v, %idx / 2 + + SDValue Vec = Op.getOperand(0); + SDValue Val = Op.getOperand(1); + SDValue Idx = Op.getOperand(2); + EVT i64 = EVT::getIntegerVT(*DAG.getContext(), 64); + EVT i32 = EVT::getIntegerVT(*DAG.getContext(), 32); + SDLoc dl(Op); + // In v512i32 and v512f32, both i32 and f32 values are placed from Low32, + // therefore convert f32 to i32 first. + SDValue I32Val = Val; + if (VT == MVT::v512f32) { + I32Val = DAG.getBitcast(i32, Val); + } + SDValue Result = Op; + if (0 /* Idx->isConstant()*/) { + // FIXME: optimized implementation using constant values + } else { + SDValue SetEq = DAG.getCondCode(ISD::SETEQ); + // SDValue CcEq = DAG.getConstant(VECC::CC_IEQ, dl, i64); + SDValue ZeroConst = DAG.getConstant(0, dl, i64); + SDValue OneConst = DAG.getConstant(1, dl, i64); + SDValue ThirtyTwoConst = DAG.getConstant(32, dl, i64); + SDValue HighMask = DAG.getConstant(0xFFFFFFFF00000000, dl, i64); + SDValue HalfIdx = DAG.getNode(ISD::SRL, dl, i64, {Idx, OneConst}); + SDValue PackedVal = + SDValue(DAG.getMachineNode(VE::lvsl_svI, dl, i64, {Vec, HalfIdx}), 0); + SDValue IdxLSB = DAG.getNode(ISD::AND, dl, i64, {Idx, OneConst}); + SDValue ShiftIdx = + DAG.getNode(ISD::SELECT_CC, dl, i64, + {IdxLSB, ZeroConst, ZeroConst, ThirtyTwoConst, SetEq}); + SDValue Mask = DAG.getNode(ISD::SRL, dl, i64, {HighMask, ShiftIdx}); + SDValue MaskedVal = DAG.getNode(ISD::AND, dl, i64, {PackedVal, Mask}); + SDValue BaseVal = SDValue( + DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0); + // In v512i32 and v512f32, Both i32 and f32 values are placed from Low32. + SDValue SubLow32 = DAG.getTargetConstant(VE::sub_i32, dl, MVT::i32); + SDValue I64Val = + SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, MVT::i64, + BaseVal, I32Val, SubLow32), + 0); + SDValue ShiftedVal = DAG.getNode(ISD::SHL, dl, i64, {I64Val, ShiftIdx}); + SDValue CombinedVal = + DAG.getNode(ISD::OR, dl, i64, {ShiftedVal, MaskedVal}); + Result = + SDValue(DAG.getMachineNode(VE::lsv_vvIs, dl, Vec.getSimpleValueType(), + {Vec, HalfIdx, CombinedVal}), + 0); + } + return Result; + } + + // Insertion is legal for other V64 types. + return Op; +} + +SDValue VETargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, + SelectionDAG &DAG) const { + LLVM_DEBUG(dbgs() << "Lowering Shuffle\n"); + SDLoc dl(Op); + ShuffleVectorSDNode *ShuffleInstr = cast(Op.getNode()); + + SDValue firstVec = ShuffleInstr->getOperand(0); + int firstVecLength = firstVec.getSimpleValueType().getVectorNumElements(); + SDValue secondVec = ShuffleInstr->getOperand(1); + int secondVecLength = secondVec.getSimpleValueType().getVectorNumElements(); + + MVT ElementType = Op.getSimpleValueType().getScalarType(); + int resultSize = Op.getSimpleValueType().getVectorNumElements(); + + if (ShuffleInstr->isSplat()) { + int index = ShuffleInstr->getSplatIndex(); + if (index >= firstVecLength) { + index -= firstVecLength; + SDValue elem = DAG.getNode( + ISD::EXTRACT_VECTOR_ELT, dl, ElementType, + {secondVec, + DAG.getConstant(index, dl, + EVT::getIntegerVT(*DAG.getContext(), 64))}); + return DAG.getNode(VEISD::VEC_BROADCAST, dl, Op.getSimpleValueType(), + elem); + } else { + SDValue elem = DAG.getNode( + ISD::EXTRACT_VECTOR_ELT, dl, ElementType, + {firstVec, DAG.getConstant( + index, dl, EVT::getIntegerVT(*DAG.getContext(), 64))}); + return DAG.getNode(VEISD::VEC_BROADCAST, dl, Op.getSimpleValueType(), + elem); + } + } + + // Supports v256 shuffles only atm. + if (firstVecLength != 256 || secondVecLength != 256 || resultSize != 256) { + LLVM_DEBUG(dbgs() << "Invalid vector lengths\n"); + return SDValue(); + } + + int firstrot = 256; + int secondrot = 256; + int firstsecond = 256; + bool inv_order; + + if (ShuffleInstr->getMaskElt(0) < 256) { + inv_order = false; + } else { + inv_order = true; + } + + for (int i = 0; i < 256; i++) { + int mask_value = ShuffleInstr->getMaskElt(i); + + if (mask_value < 0) // Undef + continue; + + if (mask_value < 256) { + if (firstsecond != 256 && !inv_order) { + LLVM_DEBUG(dbgs() << "Mixing\n"); + return SDValue(); + } + + if (firstsecond == 256 && inv_order) + firstsecond = i; + + if (firstrot == 256) + firstrot = i - mask_value; + else if (firstrot != i - mask_value) { + LLVM_DEBUG(dbgs() << "Bad first rot\n"); + return SDValue(); + } + } else { // mask_value >= 256 + if (firstsecond != 256 && inv_order) { + LLVM_DEBUG(dbgs() << "Mixing\n"); + return SDValue(); + } + + if (firstsecond == 256 && !inv_order) + firstsecond = i; + + mask_value -= 256; + + if (secondrot == 256) + secondrot = i - mask_value; + else if (secondrot != i - mask_value) { + LLVM_DEBUG(dbgs() << "Bad second rot\n"); + return SDValue(); + } + } + } + + if (firstrot < 0) + firstrot *= -1; + else + firstrot = 256 - firstrot; + if (secondrot < 0) + secondrot *= -1; + else + secondrot = 256 - secondrot; + + EVT i32 = EVT::getIntegerVT(*DAG.getContext(), 32); + EVT i64 = EVT::getIntegerVT(*DAG.getContext(), 64); + EVT v256i1 = EVT::getVectorVT(*DAG.getContext(), + EVT::getIntegerVT(*DAG.getContext(), 1), 256); + + SDValue VL = SDValue( + DAG.getMachineNode(VE::LEA32zzi, dl, MVT::i32, + DAG.getTargetConstant(resultSize, dl, MVT::i32)), + 0); + // SDValue VL = DAG.getTargetConstant(resultSize, dl, MVT::i32); + SDValue firstrotated = + firstrot % 256 != 0 + ? SDValue( + DAG.getMachineNode( + VE::vmv_vIvl, dl, firstVec.getSimpleValueType(), + {DAG.getConstant(firstrot % 256, dl, i32), firstVec, VL}), + 0) + : firstVec; + SDValue secondrotated = + secondrot % 256 != 0 + ? SDValue( + DAG.getMachineNode( + VE::vmv_vIvl, dl, secondVec.getSimpleValueType(), + {DAG.getConstant(secondrot % 256, dl, i32), secondVec, VL}), + 0) + : secondVec; + + int block = firstsecond / 64; + int secondblock = firstsecond % 64; + + SDValue Mask = DAG.getUNDEF(v256i1); + + for (int i = 0; i < block; i++) { + // set blocks to all 0s + SDValue mask = inv_order ? DAG.getConstant(0xffffffffffffffff, dl, i64) + : DAG.getConstant(0, dl, i64); + SDValue index = DAG.getTargetConstant(i, dl, i64); + Mask = SDValue( + DAG.getMachineNode(VE::lvm_mmIs, dl, v256i1, {Mask, index, mask}), 0); + } + + SDValue mask = DAG.getConstant(0xffffffffffffffff, dl, i64); + if (!inv_order) + mask = DAG.getNode(ISD::SRL, dl, i64, + {mask, DAG.getConstant(secondblock, dl, i64)}); + else + mask = DAG.getNode(ISD::SHL, dl, i64, + {mask, DAG.getConstant(64 - secondblock, dl, i64)}); + Mask = SDValue( + DAG.getMachineNode(VE::lvm_mmIs, dl, v256i1, + {Mask, DAG.getTargetConstant(block, dl, i64), mask}), + 0); + + for (int i = block + 1; i < 4; i++) { + // set blocks to all 1s + SDValue mask = inv_order ? DAG.getConstant(0, dl, i64) + : DAG.getConstant(0xffffffffffffffff, dl, i64); + SDValue index = DAG.getTargetConstant(i, dl, i64); + Mask = SDValue( + DAG.getMachineNode(VE::lvm_mmIs, dl, v256i1, {Mask, index, mask}), 0); + } + + SDValue returnValue = + SDValue(DAG.getMachineNode(VE::vmrg_vvvml, dl, Op.getSimpleValueType(), + {firstrotated, secondrotated, Mask, VL}), + 0); + return returnValue; +} + +SDValue VETargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!"); + EVT VT = Op.getOperand(0).getValueType(); + + // Special treatements for packed V64 types. + if (VT == MVT::v512i32 || VT == MVT::v512f32) { + // Example of codes: + // %packed_v = extractelt %vr, %idx / 2 + // %v = %packed_v >> (%idx % 2 * 32) + // %res = %v & 0xffffffff + + SDValue Vec = Op.getOperand(0); + SDValue Idx = Op.getOperand(1); + EVT i64 = EVT::getIntegerVT(*DAG.getContext(), 64); + EVT i32 = EVT::getIntegerVT(*DAG.getContext(), 32); + EVT f32 = EVT::getFloatingPointVT(32); + SDLoc dl(Op); + SDValue Result = Op; + if (0 /* Idx->isConstant() */) { + // FIXME: optimized implementation using constant values + } else { + SDValue SetEq = DAG.getCondCode(ISD::SETEQ); + SDValue ZeroConst = DAG.getConstant(0, dl, i64); + SDValue OneConst = DAG.getConstant(1, dl, i64); + SDValue ThirtyTwoConst = DAG.getConstant(32, dl, i64); + SDValue LowBits = DAG.getConstant(0xFFFFFFFF, dl, i64); + SDValue HalfIdx = DAG.getNode(ISD::SRL, dl, i64, {Idx, OneConst}); + SDValue PackedVal = + SDValue(DAG.getMachineNode(VE::lvsl_svI, dl, i64, {Vec, HalfIdx}), 0); + SDValue IdxLSB = DAG.getNode(ISD::AND, dl, i64, {Idx, OneConst}); + SDValue ShiftIdx = + DAG.getNode(ISD::SELECT_CC, dl, i64, + {IdxLSB, ZeroConst, ZeroConst, ThirtyTwoConst, SetEq}); + SDValue ShiftedVal = + DAG.getNode(ISD::SRL, dl, i64, {PackedVal, ShiftIdx}); + SDValue MaskedVal = DAG.getNode(ISD::AND, dl, i64, {ShiftedVal, LowBits}); + // In v512i32 and v512f32, Both i32 and f32 values are placed from Low32. + SDValue SubLow32 = DAG.getTargetConstant(VE::sub_i32, dl, MVT::i32); + Result = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, i32, + MaskedVal, SubLow32), + 0); + if (VT == MVT::v512f32) { + Result = DAG.getBitcast(f32, Result); + } + } + return Result; + } + + // Extraction is legal for other V64 types. + return Op; +} + +SDValue VETargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { + + switch (Op.getOpcode()) { + default: + llvm_unreachable("Should not custom lower this!"); + + case ISD::RETURNADDR: + return LowerRETURNADDR(Op, DAG, *this, Subtarget); + case ISD::FRAMEADDR: + return LowerFRAMEADDR(Op, DAG, *this, Subtarget); + case ISD::GlobalTLSAddress: + return LowerGlobalTLSAddress(Op, DAG); + case ISD::GlobalAddress: + return LowerGlobalAddress(Op, DAG); + case ISD::BlockAddress: + return LowerBlockAddress(Op, DAG); + case ISD::ConstantPool: + return LowerConstantPool(Op, DAG); + case ISD::EH_SJLJ_SETJMP: + return LowerEH_SJLJ_SETJMP(Op, DAG); + case ISD::EH_SJLJ_LONGJMP: + return LowerEH_SJLJ_LONGJMP(Op, DAG); + case ISD::EH_SJLJ_SETUP_DISPATCH: + return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG, *this); + case ISD::VAARG: + return LowerVAARG(Op, DAG); + case ISD::DYNAMIC_STACKALLOC: + return LowerDYNAMIC_STACKALLOC(Op, DAG); + + case ISD::LOAD: + return LowerLOAD(Op, DAG); + case ISD::STORE: + return LowerSTORE(Op, DAG); + case ISD::UMULO: + case ISD::SMULO: + return LowerUMULO_SMULO(Op, DAG, *this); + case ISD::ATOMIC_FENCE: + return LowerATOMIC_FENCE(Op, DAG); + case ISD::INTRINSIC_VOID: + return LowerINTRINSIC_VOID(Op, DAG); + case ISD::INTRINSIC_W_CHAIN: + return LowerINTRINSIC_W_CHAIN(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: + return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::BUILD_VECTOR: + return LowerBUILD_VECTOR(Op, DAG); + case ISD::INSERT_VECTOR_ELT: + return LowerINSERT_VECTOR_ELT(Op, DAG); + case ISD::EXTRACT_VECTOR_ELT: + return LowerEXTRACT_VECTOR_ELT(Op, DAG); + + case ISD::BITCAST: + return LowerBitcast(Op, DAG); + + case ISD::VECTOR_SHUFFLE: + return LowerVECTOR_SHUFFLE(Op, DAG); + + case ISD::MSCATTER: + case ISD::MGATHER: + return LowerMGATHER_MSCATTER(Op, DAG); + + case ISD::MLOAD: + return LowerMLOAD(Op, DAG); + } +} + +/// Return the entry encoding for a jump table in the +/// current function. The returned value is a member of the +/// MachineJumpTableInfo::JTEntryKind enum. +unsigned VETargetLowering::getJumpTableEncoding() const { + // VE doesn't support GOT32 style of labels in the current version of nas. + // So, we generates a following entry for each jump table. + // .4bytes .LBB0_2- + if (isPositionIndependent()) + return MachineJumpTableInfo::EK_Custom32; + + // Otherwise, use the normal jump table encoding heuristics. + return TargetLowering::getJumpTableEncoding(); +} + +const MCExpr *VETargetLowering::LowerCustomJumpTableEntry( + const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, + unsigned uid, MCContext &Ctx) const { + assert(isPositionIndependent()); + // VE doesn't support GOT32 style of labels in the current version of nas. + // So, we generates a following entry for each jump table. + // .4bytes .LBB0_2- + auto Value = MCSymbolRefExpr::create(MBB->getSymbol(), Ctx); + MCSymbol *Sym = Ctx.getOrCreateSymbol(MBB->getParent()->getName().data()); + auto Base = MCSymbolRefExpr::create(Sym, Ctx); + return MCBinaryExpr::createSub(Value, Base, Ctx); +} + +void VETargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, + MachineBasicBlock *MBB, + MachineBasicBlock *DispatchBB, + int FI) const { + DebugLoc DL = MI.getDebugLoc(); + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo *MRI = &MF->getRegInfo(); + const VEInstrInfo *TII = Subtarget->getInstrInfo(); + + const TargetRegisterClass *TRC = &VE::I64RegClass; + unsigned Tmp1 = MRI->createVirtualRegister(TRC); + unsigned Tmp2 = MRI->createVirtualRegister(TRC); + unsigned VR = MRI->createVirtualRegister(TRC); + unsigned Op = VE::STSri; + + if (isPositionIndependent()) { + // Create following instructions for local linkage PIC code. + // lea %Tmp1, DispatchBB@gotoff_lo + // and %Tmp2, %Tmp1, (32)0 + // lea.sl %Tmp3, DispatchBB@gotoff_hi(%Tmp2) + // adds.l %VR, %s15, %Tmp3 ; %s15 is GOT + // FIXME: use lea.sl %BReg, .LJTI0_0@gotoff_hi(%Tmp2, %s15) + unsigned Tmp3 = MRI->createVirtualRegister(&VE::I64RegClass); + BuildMI(*MBB, MI, DL, TII->get(VE::LEAzzi), Tmp1) + .addMBB(DispatchBB, VEMCExpr::VK_VE_GOTOFF_LO32); + BuildMI(*MBB, MI, DL, TII->get(VE::ANDrm0), Tmp2).addReg(Tmp1).addImm(32); + BuildMI(*MBB, MI, DL, TII->get(VE::LEASLrzi), Tmp3) + .addReg(Tmp2) + .addMBB(DispatchBB, VEMCExpr::VK_VE_GOTOFF_HI32); + BuildMI(*MBB, MI, DL, TII->get(VE::ADXrr), VR) + .addReg(VE::SX15) + .addReg(Tmp3); + } else { + // lea %Tmp1, DispatchBB@lo + // and %Tmp2, %Tmp1, (32)0 + // lea.sl %VR, DispatchBB@hi(%Tmp2) + BuildMI(*MBB, MI, DL, TII->get(VE::LEAzzi), Tmp1) + .addMBB(DispatchBB, VEMCExpr::VK_VE_LO32); + BuildMI(*MBB, MI, DL, TII->get(VE::ANDrm0), Tmp2).addReg(Tmp1).addImm(32); + BuildMI(*MBB, MI, DL, TII->get(VE::LEASLrzi), VR) + .addReg(Tmp2) + .addMBB(DispatchBB, VEMCExpr::VK_VE_HI32); + } + + MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op)); + addFrameReference(MIB, FI, 56 + 16); + MIB.addReg(VR); +} + +MachineBasicBlock * +VETargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, + MachineBasicBlock *BB) const { + DebugLoc DL = MI.getDebugLoc(); + MachineFunction *MF = BB->getParent(); + MachineFrameInfo &MFI = MF->getFrameInfo(); + MachineRegisterInfo *MRI = &MF->getRegInfo(); + const VEInstrInfo *TII = Subtarget->getInstrInfo(); + int FI = MFI.getFunctionContextIndex(); + + // Get a mapping of the call site numbers to all of the landing pads they're + // associated with. + DenseMap> CallSiteNumToLPad; + unsigned MaxCSNum = 0; + for (auto &MBB : *MF) { + if (!MBB.isEHPad()) + continue; + + MCSymbol *Sym = nullptr; + for (const auto &MI : MBB) { + if (MI.isDebugInstr()) + continue; + + assert(MI.isEHLabel() && "expected EH_LABEL"); + Sym = MI.getOperand(0).getMCSymbol(); + break; + } + + if (!MF->hasCallSiteLandingPad(Sym)) + continue; + + for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) { + CallSiteNumToLPad[CSI].push_back(&MBB); + MaxCSNum = std::max(MaxCSNum, CSI); + } + } + + // Get an ordered list of the machine basic blocks for the jump table. + std::vector LPadList; + SmallPtrSet InvokeBBs; + LPadList.reserve(CallSiteNumToLPad.size()); + + for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) { + for (auto &LP : CallSiteNumToLPad[CSI]) { + LPadList.push_back(LP); + InvokeBBs.insert(LP->pred_begin(), LP->pred_end()); + } + } + + assert(!LPadList.empty() && + "No landing pad destinations for the dispatch jump table!"); + + // Create the MBBs for the dispatch code. + + // Shove the dispatch's address into the return slot in the function context. + MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); + DispatchBB->setIsEHPad(true); + + MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); + BuildMI(TrapBB, DL, TII->get(VE::TRAP)); + BuildMI(TrapBB, DL, TII->get(VE::NOP)); + DispatchBB->addSuccessor(TrapBB); + + MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); + DispatchBB->addSuccessor(DispContBB); + + // Insert MBBs. + MF->push_back(DispatchBB); + MF->push_back(DispContBB); + MF->push_back(TrapBB); + + // Insert code into the entry block that creates and registers the function + // context. + SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI); + + // Create the jump table and associated information + unsigned JTE = getJumpTableEncoding(); + MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE); + unsigned MJTI = JTI->createJumpTableIndex(LPadList); + + const VERegisterInfo &RI = TII->getRegisterInfo(); + // Add a register mask with no preserved registers. This results in all + // registers being marked as clobbered. + BuildMI(DispatchBB, DL, TII->get(VE::NOP)) + .addRegMask(RI.getNoPreservedMask()); + + if (isPositionIndependent()) { + // Force to generate GETGOT, since current implementation doesn't recover + // GOT register correctly. + BuildMI(DispatchBB, DL, TII->get(VE::GETGOT), VE::SX15); + } + + // IReg is used as an index in a memory operand and therefore can't be SP + unsigned IReg = MRI->createVirtualRegister(&VE::I64RegClass); + addFrameReference(BuildMI(DispatchBB, DL, TII->get(VE::LDLUri), IReg), FI, 8); + if (LPadList.size() < 63) { + BuildMI(DispatchBB, DL, TII->get(VE::BCRLir)) + .addImm(VECC::CC_ILE) + .addImm(LPadList.size()) + .addReg(IReg) + .addMBB(TrapBB); + } else { + assert(LPadList.size() <= 0x7FFFFFFF && "Too large Landing Pad!"); + unsigned TmpReg = MRI->createVirtualRegister(&VE::I64RegClass); + BuildMI(DispatchBB, DL, TII->get(VE::LEAzzi), TmpReg) + .addImm(LPadList.size()); + BuildMI(DispatchBB, DL, TII->get(VE::BCRLrr)) + .addImm(VECC::CC_ILE) + .addReg(TmpReg) + .addReg(IReg) + .addMBB(TrapBB); + } + + unsigned BReg = MRI->createVirtualRegister(&VE::I64RegClass); + + unsigned Tmp1 = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned Tmp2 = MRI->createVirtualRegister(&VE::I64RegClass); + + if (isPositionIndependent()) { + // Create following instructions for local linkage PIC code. + // lea %Tmp1, .LJTI0_0@gotoff_lo + // and %Tmp2, %Tmp1, (32)0 + // lea.sl %Tmp3, .LJTI0_0@gotoff_hi(%Tmp2) + // adds.l %BReg, %s15, %Tmp3 ; %s15 is GOT + // FIXME: use lea.sl %BReg, .LJTI0_0@gotoff_hi(%Tmp2, %s15) + unsigned Tmp3 = MRI->createVirtualRegister(&VE::I64RegClass); + BuildMI(DispContBB, DL, TII->get(VE::LEAzzi), Tmp1) + .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_GOTOFF_LO32); + BuildMI(DispContBB, DL, TII->get(VE::ANDrm0), Tmp2).addReg(Tmp1).addImm(32); + BuildMI(DispContBB, DL, TII->get(VE::LEASLrzi), Tmp3) + .addReg(Tmp2) + .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_GOTOFF_HI32); + BuildMI(DispContBB, DL, TII->get(VE::ADXrr), BReg) + .addReg(VE::SX15) + .addReg(Tmp3); + } else { + // lea %Tmp1, .LJTI0_0@lo + // and %Tmp2, %Tmp1, (32)0 + // lea.sl %BReg, .LJTI0_0@hi(%Tmp2) + BuildMI(DispContBB, DL, TII->get(VE::LEAzzi), Tmp1) + .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_LO32); + BuildMI(DispContBB, DL, TII->get(VE::ANDrm0), Tmp2).addReg(Tmp1).addImm(32); + BuildMI(DispContBB, DL, TII->get(VE::LEASLrzi), BReg) + .addReg(Tmp2) + .addJumpTableIndex(MJTI, VEMCExpr::VK_VE_HI32); + } + + switch (JTE) { + case MachineJumpTableInfo::EK_BlockAddress: { + // Generate simple block address code for no-PIC model. + + unsigned TReg = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned Tmp1 = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned Tmp2 = MRI->createVirtualRegister(&VE::I64RegClass); + + // sll Tmp1, IReg, 3 + BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1).addReg(IReg).addImm(3); + // FIXME: combine these add and lds into "lds TReg, *(BReg, Tmp1)" + // adds.l Tmp2, BReg, Tmp1 + BuildMI(DispContBB, DL, TII->get(VE::ADXrr), Tmp2) + .addReg(Tmp1) + .addReg(BReg); + // lds TReg, *(Tmp2) + BuildMI(DispContBB, DL, TII->get(VE::LDSri), TReg).addReg(Tmp2).addImm(0); + + // jmpq *(TReg) + BuildMI(DispContBB, DL, TII->get(VE::BAri)).addReg(TReg).addImm(0); + break; + } + case MachineJumpTableInfo::EK_Custom32: { + // for the case of PIC, generates these codes + + assert(isPositionIndependent()); + unsigned OReg = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned TReg = MRI->createVirtualRegister(&VE::I64RegClass); + + unsigned Tmp1 = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned Tmp2 = MRI->createVirtualRegister(&VE::I64RegClass); + + // sll Tmp1, IReg, 2 + BuildMI(DispContBB, DL, TII->get(VE::SLLri), Tmp1).addReg(IReg).addImm(2); + // FIXME: combine these add and ldl into "ldl.zx OReg, *(BReg, Tmp1)" + // add Tmp2, BReg, Tmp1 + BuildMI(DispContBB, DL, TII->get(VE::ADXrr), Tmp2) + .addReg(Tmp1) + .addReg(BReg); + // ldl.zx OReg, *(Tmp2) + BuildMI(DispContBB, DL, TII->get(VE::LDLUri), OReg).addReg(Tmp2).addImm(0); + + // Create following instructions for local linkage PIC code. + // lea %Tmp3, fun@gotoff_lo + // and %Tmp4, %Tmp3, (32)0 + // lea.sl %Tmp5, fun@gotoff_hi(%Tmp4) + // adds.l %BReg2, %s15, %Tmp5 ; %s15 is GOT + // FIXME: use lea.sl %BReg2, fun@gotoff_hi(%Tmp4, %s15) + unsigned Tmp3 = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned Tmp4 = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned Tmp5 = MRI->createVirtualRegister(&VE::I64RegClass); + unsigned BReg2 = MRI->createVirtualRegister(&VE::I64RegClass); + const char *FunName = DispContBB->getParent()->getName().data(); + BuildMI(DispContBB, DL, TII->get(VE::LEAzzi), Tmp3) + .addExternalSymbol(FunName, VEMCExpr::VK_VE_GOTOFF_LO32); + BuildMI(DispContBB, DL, TII->get(VE::ANDrm0), Tmp4).addReg(Tmp3).addImm(32); + BuildMI(DispContBB, DL, TII->get(VE::LEASLrzi), Tmp5) + .addReg(Tmp4) + .addExternalSymbol(FunName, VEMCExpr::VK_VE_GOTOFF_HI32); + BuildMI(DispContBB, DL, TII->get(VE::ADXrr), BReg2) + .addReg(VE::SX15) + .addReg(Tmp5); + + // adds.l TReg, BReg2, OReg + BuildMI(DispContBB, DL, TII->get(VE::ADXrr), TReg) + .addReg(OReg) + .addReg(BReg2); + // jmpq *(TReg) + BuildMI(DispContBB, DL, TII->get(VE::BAri)).addReg(TReg).addImm(0); + break; + } + default: + llvm_unreachable("Unexpected jump table encoding"); + } + + // Add the jump table entries as successors to the MBB. + SmallPtrSet SeenMBBs; + for (auto &LP : LPadList) + if (SeenMBBs.insert(LP).second) + DispContBB->addSuccessor(LP); + + // N.B. the order the invoke BBs are processed in doesn't matter here. + SmallVector MBBLPads; + const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs(); + for (MachineBasicBlock *MBB : InvokeBBs) { + // Remove the landing pad successor from the invoke block and replace it + // with the new dispatch block. + // Keep a copy of Successors since it's modified inside the loop. + SmallVector Successors(MBB->succ_rbegin(), + MBB->succ_rend()); + // FIXME: Avoid quadratic complexity. + for (auto MBBS : Successors) { + if (MBBS->isEHPad()) { + MBB->removeSuccessor(MBBS); + MBBLPads.push_back(MBBS); + } + } + + MBB->addSuccessor(DispatchBB); + + // Find the invoke call and mark all of the callee-saved registers as + // 'implicit defined' so that they're spilled. This prevents code from + // moving instructions to before the EH block, where they will never be + // executed. + for (auto &II : reverse(*MBB)) { + if (!II.isCall()) + continue; + + DenseMap DefRegs; + for (auto &MOp : II.operands()) + if (MOp.isReg()) + DefRegs[MOp.getReg()] = true; + + MachineInstrBuilder MIB(*MF, &II); + for (unsigned RI = 0; SavedRegs[RI]; ++RI) { + unsigned Reg = SavedRegs[RI]; + if (!DefRegs[Reg]) + MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); + } + + break; + } + } + + // Mark all former landing pads as non-landing pads. The dispatch is the only + // landing pad now. + for (auto &LP : MBBLPads) + LP->setIsEHPad(false); + + // The instruction is gone now. + MI.eraseFromParent(); + return BB; +} + +MachineBasicBlock * +VETargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, + MachineBasicBlock *BB) const { + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unknown Custom Instruction!"); + case VE::EH_SjLj_Setup_Dispatch: + return EmitSjLjDispatchBlock(MI, BB); + } +} + +//===----------------------------------------------------------------------===// +// VE Inline Assembly Support +//===----------------------------------------------------------------------===// + +/// getConstraintType - Given a constraint letter, return the type of +/// constraint it is for this target. +VETargetLowering::ConstraintType +VETargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default: + break; + case 'r': + case 'f': + case 'e': + return C_RegisterClass; + case 'I': // SIMM13 + return C_Other; + } + } + + return TargetLowering::getConstraintType(Constraint); +} + +TargetLowering::ConstraintWeight +VETargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + if (!CallOperandVal) + return CW_Default; + + // Look at the constraint type. + switch (*constraint) { + default: + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + case 'I': // SIMM13 + if (ConstantInt *C = dyn_cast(info.CallOperandVal)) { + if (isInt<13>(C->getSExtValue())) + weight = CW_Constant; + } + break; + } + return weight; +} + +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void VETargetLowering::LowerAsmOperandForConstraint(SDValue Op, + std::string &Constraint, + std::vector &Ops, + SelectionDAG &DAG) const { + SDValue Result(nullptr, 0); + + // Only support length 1 constraints for now. + if (Constraint.length() > 1) + return; + + char ConstraintLetter = Constraint[0]; + switch (ConstraintLetter) { + default: + break; + case 'I': + if (ConstantSDNode *C = dyn_cast(Op)) { + if (isInt<13>(C->getSExtValue())) { + Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), + Op.getValueType()); + break; + } + return; + } + } + + if (Result.getNode()) { + Ops.push_back(Result); + return; + } + TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} + +std::pair +VETargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, + MVT VT) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'r': + return std::make_pair(0U, &VE::I64RegClass); + case 'f': + if (VT == MVT::f32 || VT == MVT::f64) + return std::make_pair(0U, &VE::I64RegClass); + else if (VT == MVT::f128) + return std::make_pair(0U, &VE::F128RegClass); + llvm_unreachable("Unknown ValueType for f-register-type!"); + break; + case 'e': + if (VT == MVT::f32 || VT == MVT::f64) + return std::make_pair(0U, &VE::I64RegClass); + else if (VT == MVT::f128) + return std::make_pair(0U, &VE::F128RegClass); + llvm_unreachable("Unknown ValueType for e-register-type!"); + break; + } + } else if (!Constraint.empty() && Constraint.size() <= 5 && + Constraint[0] == '{' && *(Constraint.end() - 1) == '}') { + // constraint = '{r}' + // Remove the braces from around the name. + StringRef name(Constraint.data() + 1, Constraint.size() - 2); + // Handle register aliases: + // r0-r7 -> g0-g7 + // r8-r15 -> o0-o7 + // r16-r23 -> l0-l7 + // r24-r31 -> i0-i7 + uint64_t intVal = 0; + if (name.substr(0, 1).equals("r") && + !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) { + const char regTypes[] = {'g', 'o', 'l', 'i'}; + char regType = regTypes[intVal / 8]; + char regIdx = '0' + (intVal % 8); + char tmp[] = {'{', regType, regIdx, '}', 0}; + std::string newConstraint = std::string(tmp); + return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint, + VT); + } + } + + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} + +bool VETargetLowering::isOffsetFoldingLegal( + const GlobalAddressSDNode *GA) const { + // The VE target isn't yet aware of offsets. + return false; +} + +void VETargetLowering::ReplaceNodeResults(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const { + + SDLoc dl(N); + + switch (N->getOpcode()) { + case ISD::BUILD_VECTOR: + case ISD::INSERT_VECTOR_ELT: + case ISD::EXTRACT_VECTOR_ELT: + case ISD::VECTOR_SHUFFLE: + case ISD::MSCATTER: + case ISD::MGATHER: + case ISD::MLOAD: + // ask llvm to expand vector related instructions if those are not legal. + return; + default: + LLVM_DEBUG(N->dumpr(&DAG)); + llvm_unreachable("Do not know how to custom type legalize this operation!"); + } +} + +// Override to enable LOAD_STACK_GUARD lowering on Linux. +bool VETargetLowering::useLoadStackGuardNode() const { + if (!Subtarget->isTargetLinux()) + return TargetLowering::useLoadStackGuardNode(); + return true; +} + +// Override to disable global variable loading on Linux. +void VETargetLowering::insertSSPDeclarations(Module &M) const { + if (!Subtarget->isTargetLinux()) + return TargetLowering::insertSSPDeclarations(M); +} + +void VETargetLowering::finalizeLowering(MachineFunction &MF) const { + TargetLoweringBase::finalizeLowering(MF); +} diff --git a/llvm/lib/Target/VE/VEInstrBuilder.h b/llvm/lib/Target/VE/VEInstrBuilder.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrBuilder.h @@ -0,0 +1,39 @@ +//===-- VEInstrBuilder.h - Aides for building VE insts ----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file exposes functions that may be used with BuildMI from the +// MachineInstrBuilder.h file to simplify generating frame and constant pool +// references. +// +// For reference, the order of operands for memory references is: +// (Operand), Dest Reg, Base Reg, and either Reg Index or Immediate +// Displacement. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VEINSTRBUILDER_H +#define LLVM_LIB_TARGET_VE_VEINSTRBUILDER_H + +#include "llvm/CodeGen/MachineInstrBuilder.h" + +namespace llvm { + +/// addFrameReference - This function is used to add a reference to the base of +/// an abstract object on the stack frame of the current function. This +/// reference has base register as the FrameIndex offset until it is resolved. +/// This allows a constant offset to be specified as well... +/// +static inline const MachineInstrBuilder & +addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0, + bool mem = true) { + return MIB.addFrameIndex(FI).addImm(Offset); +} + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VEInstrFormats.td b/llvm/lib/Target/VE/VEInstrFormats.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrFormats.td @@ -0,0 +1,103 @@ +//===-- VEInstrFormats.td - VE Instruction Formats ---------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +class InstVE pattern, + InstrItinClass itin = NoItinerary> + : Instruction { + field bits<64> Inst; + + let Namespace = "VE"; + let Size = 8; + + bits<8> op; + let Inst{0-7} = op; + + dag OutOperandList = outs; + dag InOperandList = ins; + let AsmString = asmstr; + let Pattern = pattern; + + let DecoderNamespace = "VE"; + field bits<64> SoftFail = 0; + + let Itinerary = itin; +} + +class RMopVal, dag outs, dag ins, string asmstr, list pattern, + InstrItinClass itin = NoItinerary> + : InstVE { + bits<1> cx = 0; + bits<7> sx; + bits<1> cy = 0; + bits<7> sy; + bits<1> cz = 0; + bits<7> sz; + bits<32> imm32 = 0; + let op = opVal; + let Inst{15} = cx; + let Inst{14-8} = sx; + let Inst{23} = cy; + let Inst{22-16} = sy; + let Inst{31} = cz; + let Inst{30-24} = sz; + let Inst{63-32} = imm32; +} + +class RRopVal, dag outs, dag ins, string asmstr, list pattern, + InstrItinClass itin = NoItinerary> + : RM { + bits<1> cw = 0; + bits<1> cw2 = 0; + bits<4> cfw = 0; + let imm32{0-23} = 0; + let imm32{24} = cw; + let imm32{25} = cw2; + let imm32{26-27} = 0; + let imm32{28-31} = cfw; +} + +class RRFENCEopVal, dag outs, dag ins, string asmstr, list pattern, + InstrItinClass itin = NoItinerary> + : InstVE { + bits<1> avo = 0; + bits<1> lf = 0; + bits<1> sf = 0; + bits<1> c2 = 0; + bits<1> c1 = 0; + bits<1> c0 = 0; + let op = opVal; + let Inst{15} = avo; + let Inst{14-10} = 0; + let Inst{9} = lf; + let Inst{8} = sf; + let Inst{23-19} = 0; + let Inst{18} = c2; + let Inst{17} = c1; + let Inst{16} = c0; + let Inst{31-24} = 0; + let Inst{63-32} = 0; +} + +class CFopVal, dag outs, dag ins, string asmstr, list pattern, + InstrItinClass itin = NoItinerary> + : RM { + bits<1> cx2; + bits<2> bpf; + bits<4> cf; + let cx = 0; + let sx{6} = cx2; + let sx{5-4} = bpf; + let sx{3-0} = cf; +} + +// Pseudo instructions. +class Pseudo pattern> + : InstVE { + let isCodeGenOnly = 1; + let isPseudo = 1; +} diff --git a/llvm/lib/Target/VE/VEInstrInfo.h b/llvm/lib/Target/VE/VEInstrInfo.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrInfo.h @@ -0,0 +1,103 @@ +//===-- VEInstrInfo.h - VE Instruction Information --------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the VE implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VEINSTRINFO_H +#define LLVM_LIB_TARGET_VE_VEINSTRINFO_H + +#include "VERegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" + +#define GET_INSTRINFO_HEADER +#include "VEGenInstrInfo.inc" + +namespace llvm { + +class VESubtarget; + +class VEInstrInfo : public VEGenInstrInfo { + const VERegisterInfo RI; + const VESubtarget &Subtarget; + virtual void anchor(); + +public: + explicit VEInstrInfo(VESubtarget &ST); + + /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As + /// such, whenever a client has an instance of instruction info, it should + /// always be able to get register info as well (through this method). + /// + const VERegisterInfo &getRegisterInfo() const { return RI; } + + /// isLoadFromStackSlot - If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + unsigned isLoadFromStackSlot(const MachineInstr &MI, + int &FrameIndex) const override; + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + unsigned isStoreToStackSlot(const MachineInstr &MI, + int &FrameIndex) const override; + + bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify = false) const override; + + unsigned removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved = nullptr) const override; + + unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded = nullptr) const override; + + bool + reverseBranchCondition(SmallVectorImpl &Cond) const override; + + void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, + bool KillSrc) const override; + void copyPhysSubRegs(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, + bool KillSrc, const MCInstrDesc &MCID, + unsigned int numSubRegs, + const unsigned *subRegIdx) const; + + void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, unsigned SrcReg, + bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, unsigned DestReg, + int FrameIndex, const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + unsigned getGlobalBaseReg(MachineFunction *MF) const; + + // Lower pseudo instructions after register allocation. + bool expandPostRAPseudo(MachineInstr &MI) const override; + + bool expandExtendStackPseudo(MachineInstr &MI) const; + bool expandGetStackTopPseudo(MachineInstr &MI) const; +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrInfo.cpp @@ -0,0 +1,1007 @@ +//===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the VE implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "VEInstrInfo.h" +#include "VE.h" +#include "VEMachineFunctionInfo.h" +#include "VESubtarget.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +#define DEBUG_TYPE "ve" + +using namespace llvm; + +static cl::opt + ShowSpillMessageVec("show-spill-message-vec", cl::init(false), + cl::desc("Enable diagnostic message for spill/restore " + "of vector or vector mask registers."), + cl::Hidden); + +using namespace llvm; + +#define GET_INSTRINFO_CTOR_DTOR +#include "VEGenInstrInfo.inc" + +// Pin the vtable to this file. +void VEInstrInfo::anchor() {} + +VEInstrInfo::VEInstrInfo(VESubtarget &ST) + : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI(), + Subtarget(ST) {} + +/// isLoadFromStackSlot - If the specified machine instruction is a direct +/// load from a stack slot, return the virtual or physical register number of +/// the destination along with the FrameIndex of the loaded stack slot. If +/// not, return 0. This predicate must return 0 if the instruction has +/// any side effects other than loading from the stack slot. +unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, + int &FrameIndex) const { + if (MI.getOpcode() == VE::LDSri || // I64 + MI.getOpcode() == VE::LDLri || // I32 + MI.getOpcode() == VE::LDUri || // F32 + MI.getOpcode() == VE::LDQri // F128 (pseudo) + ) { + if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && + MI.getOperand(2).getImm() == 0) { + FrameIndex = MI.getOperand(1).getIndex(); + return MI.getOperand(0).getReg(); + } + } + return 0; +} + +/// isStoreToStackSlot - If the specified machine instruction is a direct +/// store to a stack slot, return the virtual or physical register number of +/// the source reg along with the FrameIndex of the loaded stack slot. If +/// not, return 0. This predicate must return 0 if the instruction has +/// any side effects other than storing to the stack slot. +unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI, + int &FrameIndex) const { + if (MI.getOpcode() == VE::STSri || // I64 + MI.getOpcode() == VE::STLri || // I32 + MI.getOpcode() == VE::STUri || // F32 + MI.getOpcode() == VE::STQri || // F128 (pseudo) + MI.getOpcode() == VE::STVRri || // V64 (pseudo) + MI.getOpcode() == VE::STVMri || // VM (pseudo) + MI.getOpcode() == VE::STVM512ri // VM512 (pseudo) + ) { + if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && + MI.getOperand(1).getImm() == 0) { + FrameIndex = MI.getOperand(0).getIndex(); + return MI.getOperand(2).getReg(); + } + } + return 0; +} + +static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); } + +static VECC::CondCodes GetOppositeBranchCondition(VECC::CondCodes CC) { + switch (CC) { + case VECC::CC_IG: + return VECC::CC_ILE; + case VECC::CC_IL: + return VECC::CC_IGE; + case VECC::CC_INE: + return VECC::CC_IEQ; + case VECC::CC_IEQ: + return VECC::CC_INE; + case VECC::CC_IGE: + return VECC::CC_IL; + case VECC::CC_ILE: + return VECC::CC_IG; + case VECC::CC_AF: + return VECC::CC_AT; + case VECC::CC_G: + return VECC::CC_LENAN; + case VECC::CC_L: + return VECC::CC_GENAN; + case VECC::CC_NE: + return VECC::CC_EQNAN; + case VECC::CC_EQ: + return VECC::CC_NENAN; + case VECC::CC_GE: + return VECC::CC_LNAN; + case VECC::CC_LE: + return VECC::CC_GNAN; + case VECC::CC_NUM: + return VECC::CC_NAN; + case VECC::CC_NAN: + return VECC::CC_NUM; + case VECC::CC_GNAN: + return VECC::CC_LE; + case VECC::CC_LNAN: + return VECC::CC_GE; + case VECC::CC_NENAN: + return VECC::CC_EQ; + case VECC::CC_EQNAN: + return VECC::CC_NE; + case VECC::CC_GENAN: + return VECC::CC_L; + case VECC::CC_LENAN: + return VECC::CC_G; + case VECC::CC_AT: + return VECC::CC_AF; + } + llvm_unreachable("Invalid cond code"); +} + +// Treat br.l [BCR AT] as unconditional branch +static bool isUncondBranchOpcode(int Opc) { + return Opc == VE::BCRLa || Opc == VE::BCRWa || Opc == VE::BCRDa || + Opc == VE::BCRSa; +} + +static bool isCondBranchOpcode(int Opc) { + return Opc == VE::BCRLrr || Opc == VE::BCRLir || Opc == VE::BCRLrm0 || + Opc == VE::BCRLrm1 || Opc == VE::BCRLim0 || Opc == VE::BCRLim1 || + Opc == VE::BCRWrr || Opc == VE::BCRWir || Opc == VE::BCRWrm0 || + Opc == VE::BCRWrm1 || Opc == VE::BCRWim0 || Opc == VE::BCRWim1 || + Opc == VE::BCRDrr || Opc == VE::BCRDir || Opc == VE::BCRDrm0 || + Opc == VE::BCRDrm1 || Opc == VE::BCRDim0 || Opc == VE::BCRDim1 || + Opc == VE::BCRSrr || Opc == VE::BCRSir || Opc == VE::BCRSrm0 || + Opc == VE::BCRSrm1 || Opc == VE::BCRSim0 || Opc == VE::BCRSim1; +} + +static bool isIndirectBranchOpcode(int Opc) { + report_fatal_error("isIndirectBranchOpcode is not implemented yet"); +} + +static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, + SmallVectorImpl &Cond) { + Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm())); + Cond.push_back(LastInst->getOperand(1)); + Cond.push_back(LastInst->getOperand(2)); + Target = LastInst->getOperand(3).getMBB(); +} + +bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const { + MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); + if (I == MBB.end()) + return false; + + if (!isUnpredicatedTerminator(*I)) + return false; + + // Get the last instruction in the block. + MachineInstr *LastInst = &*I; + unsigned LastOpc = LastInst->getOpcode(); + + // If there is only one terminator instruction, process it. + if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { + if (isUncondBranchOpcode(LastOpc)) { + TBB = LastInst->getOperand(0).getMBB(); + return false; + } + if (isCondBranchOpcode(LastOpc)) { + // Block ends with fall-through condbranch. + parseCondBranch(LastInst, TBB, Cond); + return false; + } + return true; // Can't handle indirect branch. + } + + // Get the instruction before it if it is a terminator. + MachineInstr *SecondLastInst = &*I; + unsigned SecondLastOpc = SecondLastInst->getOpcode(); + + // If AllowModify is true and the block ends with two or more unconditional + // branches, delete all but the first unconditional branch. + if (AllowModify && isUncondBranchOpcode(LastOpc)) { + while (isUncondBranchOpcode(SecondLastOpc)) { + LastInst->eraseFromParent(); + LastInst = SecondLastInst; + LastOpc = LastInst->getOpcode(); + if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { + // Return now the only terminator is an unconditional branch. + TBB = LastInst->getOperand(0).getMBB(); + return false; + } else { + SecondLastInst = &*I; + SecondLastOpc = SecondLastInst->getOpcode(); + } + } + } + + // If there are three terminators, we don't know what sort of block this is. + if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)) + return true; + + // If the block ends with a B and a Bcc, handle it. + if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { + parseCondBranch(SecondLastInst, TBB, Cond); + FBB = LastInst->getOperand(0).getMBB(); + return false; + } + + // If the block ends with two unconditional branches, handle it. The second + // one is not executed. + if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { + TBB = SecondLastInst->getOperand(0).getMBB(); + return false; + } + + // ...likewise if it ends with an indirect branch followed by an unconditional + // branch. + if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { + I = LastInst; + if (AllowModify) + I->eraseFromParent(); + return true; + } + + // Otherwise, can't handle this. + return true; +} + +unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB, + MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + ArrayRef Cond, + const DebugLoc &DL, int *BytesAdded) const { + assert(TBB && "insertBranch must not be told to insert a fallthrough"); + assert((Cond.size() == 3 || Cond.size() == 0) && + "VE branch conditions should have three component!"); + assert(!BytesAdded && "code size not handled"); + if (Cond.empty()) { + // Uncondition branch + assert(!FBB && "Unconditional branch with multiple successors!"); + BuildMI(&MBB, DL, get(VE::BCRLa)).addMBB(TBB); + return 1; + } + + // Conditional branch + // (BCRir CC sy sz addr) + + assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented"); + + unsigned opc[2]; + const TargetRegisterInfo *TRI = &getRegisterInfo(); + MachineFunction *MF = MBB.getParent(); + const MachineRegisterInfo &MRI = MF->getRegInfo(); + unsigned Reg = Cond[2].getReg(); + if (IsIntegerCC(Cond[0].getImm())) { + if (TRI->getRegSizeInBits(Reg, MRI) == 32) { + opc[0] = VE::BCRWir; + opc[1] = VE::BCRWrr; + } else { + opc[0] = VE::BCRLir; + opc[1] = VE::BCRLrr; + } + } else { + if (TRI->getRegSizeInBits(Reg, MRI) == 32) { + opc[0] = VE::BCRSir; + opc[1] = VE::BCRSrr; + } else { + opc[0] = VE::BCRDir; + opc[1] = VE::BCRDrr; + } + } + if (Cond[1].isImm()) { + BuildMI(&MBB, DL, get(opc[0])) + .add(Cond[0]) // condition code + .add(Cond[1]) // lhs + .add(Cond[2]) // rhs + .addMBB(TBB); + } else { + BuildMI(&MBB, DL, get(opc[1])) + .add(Cond[0]) + .add(Cond[1]) + .add(Cond[2]) + .addMBB(TBB); + } + + if (!FBB) + return 1; + BuildMI(&MBB, DL, get(VE::BCRLa)).addMBB(FBB); + return 2; +} + +unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved) const { + assert(!BytesRemoved && "code size not handled"); + + MachineBasicBlock::iterator I = MBB.end(); + unsigned Count = 0; + while (I != MBB.begin()) { + --I; + + if (I->isDebugValue()) + continue; + + if (!isUncondBranchOpcode(I->getOpcode()) && + !isCondBranchOpcode(I->getOpcode())) + break; // Not a branch + + I->eraseFromParent(); + I = MBB.end(); + ++Count; + } + return Count; + + // report_fatal_error("removeBranch is not implemented yet"); +} + +bool VEInstrInfo::reverseBranchCondition( + SmallVectorImpl &Cond) const { + VECC::CondCodes CC = static_cast(Cond[0].getImm()); + Cond[0].setImm(GetOppositeBranchCondition(CC)); + return false; +} + +void VEInstrInfo::copyPhysSubRegs( + MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, + unsigned DestReg, unsigned SrcReg, bool KillSrc, const MCInstrDesc &MCID, + unsigned int numSubRegs, const unsigned *subRegIdx) const { + const TargetRegisterInfo *TRI = &getRegisterInfo(); + MachineInstr *MovMI = nullptr; + + for (unsigned i = 0; i != numSubRegs; ++i) { + unsigned SubDest = TRI->getSubReg(DestReg, subRegIdx[i]); + unsigned SubSrc = TRI->getSubReg(SrcReg, subRegIdx[i]); + assert(SubDest && SubSrc && "Bad sub-register"); + + if (MCID.getOpcode() == VE::ORri) { + // generate "ORri, dest, src, 0" instruction. + MachineInstrBuilder MIB = + BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0); + MovMI = MIB.getInstr(); + } else if (MCID.getOpcode() == VE::andm_mmm) { + // generate "ANDM, dest, vm0, src" instruction. + MachineInstrBuilder MIB = + BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc); + MovMI = MIB.getInstr(); + } else { + llvm_unreachable("Unexpected reg-to-reg copy instruction"); + } + } + // Add implicit super-register defs and kills to the last MovMI. + MovMI->addRegisterDefined(DestReg, TRI); + if (KillSrc) + MovMI->addRegisterKilled(SrcReg, TRI); +} + +void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, const DebugLoc &DL, + MCRegister DestReg, MCRegister SrcReg, + bool KillSrc) const { + + // For the case of VE, I32, I64, and F32 uses the identical + // registers %s0-%s63, so no need to check other register classes + // here + if (VE::I32RegClass.contains(DestReg, SrcReg)) + BuildMI(MBB, I, DL, get(VE::ORri), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addImm(0); + // any scaler to any scaler + else if ((VE::I32RegClass.contains(SrcReg) || + VE::F32RegClass.contains(SrcReg) || + VE::I64RegClass.contains(SrcReg)) && + (VE::I32RegClass.contains(DestReg) || + VE::F32RegClass.contains(DestReg) || + VE::I64RegClass.contains(DestReg))) + BuildMI(MBB, I, DL, get(VE::ORri), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addImm(0); + else if (VE::V64RegClass.contains(DestReg, SrcReg)) { + // Generate following instructions + // LEA32zzi %vl, 256 + // vor_v1vl %dest, (0)1, %src, %vl + // TODO: reuse a register if vl is already assigned to a register + unsigned TmpReg = VE::SX12; + BuildMI(MBB, I, DL, get(VE::LEA32zzi), TmpReg).addImm(256); + BuildMI(MBB, I, DL, get(VE::vor_v1vl), DestReg) + .addImm(0) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(TmpReg, getKillRegState(true)); + } else if (VE::VMRegClass.contains(DestReg, SrcReg)) + BuildMI(MBB, I, DL, get(VE::andm_mmm), DestReg) + .addReg(VE::VM0) + .addReg(SrcReg, getKillRegState(KillSrc)); + else if (VE::VM512RegClass.contains(DestReg, SrcReg)) { + // Use two instructions. + const unsigned subRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd}; + unsigned int numSubRegs = 2; + copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::andm_mmm), + numSubRegs, subRegIdx); + } else if (VE::F128RegClass.contains(DestReg, SrcReg)) { + // Use two instructions. + const unsigned subRegIdx[] = {VE::sub_even, VE::sub_odd}; + unsigned int numSubRegs = 2; + copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri), + numSubRegs, subRegIdx); + } else { + const TargetRegisterInfo *TRI = &getRegisterInfo(); + dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI) + << " to " << printReg(DestReg, TRI) << "\n"; + llvm_unreachable("Impossible reg-to-reg copy"); + } +} + +void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned SrcReg, bool isKill, int FI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (I != MBB.end()) + DL = I->getDebugLoc(); + + if (ShowSpillMessageVec) { + if (RC == &VE::V64RegClass) { + dbgs() << "spill " << printReg(SrcReg, TRI) << " - V64\n"; + } else if (RC == &VE::VMRegClass) { + dbgs() << "spill " << printReg(SrcReg, TRI) << " - VM\n"; + } else if (VE::VM512RegClass.hasSubClassEq(RC)) { + dbgs() << "spill " << printReg(SrcReg, TRI) << " - VM512\n"; + } + } + + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); + MachineMemOperand *MMO = MF->getMachineMemOperand( + MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, + MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + + // On the order of operands here: think "[FrameIdx + 0] = SrcReg". + if (RC == &VE::I64RegClass) + BuildMI(MBB, I, DL, get(VE::STSri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + else if (RC == &VE::I32RegClass) + BuildMI(MBB, I, DL, get(VE::STLri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + else if (RC == &VE::F32RegClass) + BuildMI(MBB, I, DL, get(VE::STUri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + else if (VE::F128RegClass.hasSubClassEq(RC)) + BuildMI(MBB, I, DL, get(VE::STQri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + else if (RC == &VE::V64RegClass) + BuildMI(MBB, I, DL, get(VE::STVRri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addImm(256) + .addMemOperand(MMO); + else if (RC == &VE::VMRegClass) + BuildMI(MBB, I, DL, get(VE::STVMri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + else if (VE::VM512RegClass.hasSubClassEq(RC)) + BuildMI(MBB, I, DL, get(VE::STVM512ri)) + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + else + report_fatal_error("Can't store this register to stack slot"); +} + +void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + unsigned DestReg, int FI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (I != MBB.end()) + DL = I->getDebugLoc(); + + if (ShowSpillMessageVec) { + if (RC == &VE::V64RegClass) { + dbgs() << "restore " << printReg(DestReg, TRI) << " - V64\n"; + } else if (RC == &VE::VMRegClass) { + dbgs() << "restore " << printReg(DestReg, TRI) << " - VM\n"; + } else if (VE::VM512RegClass.hasSubClassEq(RC)) { + dbgs() << "restore " << printReg(DestReg, TRI) << " - VM512\n"; + } + } + + MachineFunction *MF = MBB.getParent(); + const MachineFrameInfo &MFI = MF->getFrameInfo(); + MachineMemOperand *MMO = MF->getMachineMemOperand( + MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, + MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); + + if (RC == &VE::I64RegClass) + BuildMI(MBB, I, DL, get(VE::LDSri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + else if (RC == &VE::I32RegClass) + BuildMI(MBB, I, DL, get(VE::LDLri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + else if (RC == &VE::F32RegClass) + BuildMI(MBB, I, DL, get(VE::LDUri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + else if (VE::F128RegClass.hasSubClassEq(RC)) + BuildMI(MBB, I, DL, get(VE::LDQri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + else if (RC == &VE::V64RegClass) + BuildMI(MBB, I, DL, get(VE::LDVRri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addImm(256) + .addMemOperand(MMO); + else if (RC == &VE::VMRegClass) + BuildMI(MBB, I, DL, get(VE::LDVMri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + else if (VE::VM512RegClass.hasSubClassEq(RC)) + BuildMI(MBB, I, DL, get(VE::LDVM512ri), DestReg) + .addFrameIndex(FI) + .addImm(0) + .addMemOperand(MMO); + else + report_fatal_error("Can't load this register from stack slot"); +} + +unsigned VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const { + VEMachineFunctionInfo *VEFI = MF->getInfo(); + unsigned GlobalBaseReg = VEFI->getGlobalBaseReg(); + if (GlobalBaseReg != 0) + return GlobalBaseReg; + + // We use %s15 (%got) as a global base register + GlobalBaseReg = VE::SX15; + + // Insert a pseudo instruction to set the GlobalBaseReg into the first + // MBB of the function + MachineBasicBlock &FirstMBB = MF->front(); + MachineBasicBlock::iterator MBBI = FirstMBB.begin(); + DebugLoc dl; + BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg); + VEFI->setGlobalBaseReg(GlobalBaseReg); + return GlobalBaseReg; +} + +static int GetVM512Upper(int no) { return (no - VE::VMP0) * 2 + VE::VM0; } + +static int GetVM512Lower(int no) { return GetVM512Upper(no) + 1; } + +static void buildVMRInst(MachineInstr &MI, const MCInstrDesc &MCID) { + MachineBasicBlock *MBB = MI.getParent(); + DebugLoc dl = MI.getDebugLoc(); + + unsigned VMXu = GetVM512Upper(MI.getOperand(0).getReg()); + unsigned VMXl = GetVM512Lower(MI.getOperand(0).getReg()); + unsigned VMYu = GetVM512Upper(MI.getOperand(1).getReg()); + unsigned VMYl = GetVM512Lower(MI.getOperand(1).getReg()); + + switch (MI.getOpcode()) { + default: { + unsigned VMZu = GetVM512Upper(MI.getOperand(2).getReg()); + unsigned VMZl = GetVM512Lower(MI.getOperand(2).getReg()); + BuildMI(*MBB, MI, dl, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu); + BuildMI(*MBB, MI, dl, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl); + break; + } + case VE::negm_MM: + BuildMI(*MBB, MI, dl, MCID).addDef(VMXu).addUse(VMYu); + BuildMI(*MBB, MI, dl, MCID).addDef(VMXl).addUse(VMYl); + break; + } + MI.eraseFromParent(); +} + +static void expandPseudoVFMK_VL(const TargetInstrInfo &TI, MachineInstr &MI) { + // replace to pvfmk.w.up and pvfmk.w.lo + // replace to pvfmk.s.up and pvfmk.s.lo + + std::map> map = { + {VE::pvfmkat_Ml, {VE::pvfmkwupat_ml, VE::pvfmkwloat_ml}}, + {VE::pvfmkaf_Ml, {VE::pvfmkwupaf_ml, VE::pvfmkwloaf_ml}}, + {VE::pvfmkwgt_Mvl, {VE::pvfmkwupgt_mvl, VE::pvfmkwlogt_mvl}}, + {VE::pvfmkwlt_Mvl, {VE::pvfmkwuplt_mvl, VE::pvfmkwlolt_mvl}}, + {VE::pvfmkwne_Mvl, {VE::pvfmkwupne_mvl, VE::pvfmkwlone_mvl}}, + {VE::pvfmkweq_Mvl, {VE::pvfmkwupeq_mvl, VE::pvfmkwloeq_mvl}}, + {VE::pvfmkwge_Mvl, {VE::pvfmkwupge_mvl, VE::pvfmkwloge_mvl}}, + {VE::pvfmkwle_Mvl, {VE::pvfmkwuple_mvl, VE::pvfmkwlole_mvl}}, + {VE::pvfmkwnum_Mvl, {VE::pvfmkwupnum_mvl, VE::pvfmkwlonum_mvl}}, + {VE::pvfmkwnan_Mvl, {VE::pvfmkwupnan_mvl, VE::pvfmkwlonan_mvl}}, + {VE::pvfmkwgtnan_Mvl, {VE::pvfmkwupgtnan_mvl, VE::pvfmkwlogtnan_mvl}}, + {VE::pvfmkwltnan_Mvl, {VE::pvfmkwupltnan_mvl, VE::pvfmkwloltnan_mvl}}, + {VE::pvfmkwnenan_Mvl, {VE::pvfmkwupnenan_mvl, VE::pvfmkwlonenan_mvl}}, + {VE::pvfmkweqnan_Mvl, {VE::pvfmkwupeqnan_mvl, VE::pvfmkwloeqnan_mvl}}, + {VE::pvfmkwgenan_Mvl, {VE::pvfmkwupgenan_mvl, VE::pvfmkwlogenan_mvl}}, + {VE::pvfmkwlenan_Mvl, {VE::pvfmkwuplenan_mvl, VE::pvfmkwlolenan_mvl}}, + + {VE::pvfmkwgt_MvMl, {VE::pvfmkwupgt_mvml, VE::pvfmkwlogt_mvml}}, + {VE::pvfmkwlt_MvMl, {VE::pvfmkwuplt_mvml, VE::pvfmkwlolt_mvml}}, + {VE::pvfmkwne_MvMl, {VE::pvfmkwupne_mvml, VE::pvfmkwlone_mvml}}, + {VE::pvfmkweq_MvMl, {VE::pvfmkwupeq_mvml, VE::pvfmkwloeq_mvml}}, + {VE::pvfmkwge_MvMl, {VE::pvfmkwupge_mvml, VE::pvfmkwloge_mvml}}, + {VE::pvfmkwle_MvMl, {VE::pvfmkwuple_mvml, VE::pvfmkwlole_mvml}}, + {VE::pvfmkwnum_MvMl, {VE::pvfmkwupnum_mvml, VE::pvfmkwlonum_mvml}}, + {VE::pvfmkwnan_MvMl, {VE::pvfmkwupnan_mvml, VE::pvfmkwlonan_mvml}}, + {VE::pvfmkwgtnan_MvMl, {VE::pvfmkwupgtnan_mvml, VE::pvfmkwlogtnan_mvml}}, + {VE::pvfmkwltnan_MvMl, {VE::pvfmkwupltnan_mvml, VE::pvfmkwloltnan_mvml}}, + {VE::pvfmkwnenan_MvMl, {VE::pvfmkwupnenan_mvml, VE::pvfmkwlonenan_mvml}}, + {VE::pvfmkweqnan_MvMl, {VE::pvfmkwupeqnan_mvml, VE::pvfmkwloeqnan_mvml}}, + {VE::pvfmkwgenan_MvMl, {VE::pvfmkwupgenan_mvml, VE::pvfmkwlogenan_mvml}}, + {VE::pvfmkwlenan_MvMl, {VE::pvfmkwuplenan_mvml, VE::pvfmkwlolenan_mvml}}, + + {VE::pvfmksgt_Mvl, {VE::pvfmksupgt_mvl, VE::pvfmkslogt_mvl}}, + {VE::pvfmksgt_MvMl, {VE::pvfmksupgt_mvml, VE::pvfmkslogt_mvml}}, + {VE::pvfmkslt_Mvl, {VE::pvfmksuplt_mvl, VE::pvfmkslolt_mvl}}, + {VE::pvfmksne_Mvl, {VE::pvfmksupne_mvl, VE::pvfmkslone_mvl}}, + {VE::pvfmkseq_Mvl, {VE::pvfmksupeq_mvl, VE::pvfmksloeq_mvl}}, + {VE::pvfmksge_Mvl, {VE::pvfmksupge_mvl, VE::pvfmksloge_mvl}}, + {VE::pvfmksle_Mvl, {VE::pvfmksuple_mvl, VE::pvfmkslole_mvl}}, + {VE::pvfmksnum_Mvl, {VE::pvfmksupnum_mvl, VE::pvfmkslonum_mvl}}, + {VE::pvfmksnan_Mvl, {VE::pvfmksupnan_mvl, VE::pvfmkslonan_mvl}}, + {VE::pvfmksgtnan_Mvl, {VE::pvfmksupgtnan_mvl, VE::pvfmkslogtnan_mvl}}, + {VE::pvfmksltnan_Mvl, {VE::pvfmksupltnan_mvl, VE::pvfmksloltnan_mvl}}, + {VE::pvfmksnenan_Mvl, {VE::pvfmksupnenan_mvl, VE::pvfmkslonenan_mvl}}, + {VE::pvfmkseqnan_Mvl, {VE::pvfmksupeqnan_mvl, VE::pvfmksloeqnan_mvl}}, + {VE::pvfmksgenan_Mvl, {VE::pvfmksupgenan_mvl, VE::pvfmkslogenan_mvl}}, + {VE::pvfmkslenan_Mvl, {VE::pvfmksuplenan_mvl, VE::pvfmkslolenan_mvl}}, + + {VE::pvfmksgt_MvMl, {VE::pvfmksupgt_mvml, VE::pvfmkslogt_mvml}}, + {VE::pvfmkslt_MvMl, {VE::pvfmksuplt_mvml, VE::pvfmkslolt_mvml}}, + {VE::pvfmksne_MvMl, {VE::pvfmksupne_mvml, VE::pvfmkslone_mvml}}, + {VE::pvfmkseq_MvMl, {VE::pvfmksupeq_mvml, VE::pvfmksloeq_mvml}}, + {VE::pvfmksge_MvMl, {VE::pvfmksupge_mvml, VE::pvfmksloge_mvml}}, + {VE::pvfmksle_MvMl, {VE::pvfmksuple_mvml, VE::pvfmkslole_mvml}}, + {VE::pvfmksnum_MvMl, {VE::pvfmksupnum_mvml, VE::pvfmkslonum_mvml}}, + {VE::pvfmksnan_MvMl, {VE::pvfmksupnan_mvml, VE::pvfmkslonan_mvml}}, + {VE::pvfmksgtnan_MvMl, {VE::pvfmksupgtnan_mvml, VE::pvfmkslogtnan_mvml}}, + {VE::pvfmksltnan_MvMl, {VE::pvfmksupltnan_mvml, VE::pvfmksloltnan_mvml}}, + {VE::pvfmksnenan_MvMl, {VE::pvfmksupnenan_mvml, VE::pvfmkslonenan_mvml}}, + {VE::pvfmkseqnan_MvMl, {VE::pvfmksupeqnan_mvml, VE::pvfmksloeqnan_mvml}}, + {VE::pvfmksgenan_MvMl, {VE::pvfmksupgenan_mvml, VE::pvfmkslogenan_mvml}}, + {VE::pvfmkslenan_MvMl, {VE::pvfmksuplenan_mvml, VE::pvfmkslolenan_mvml}}, + }; + + unsigned Opcode = MI.getOpcode(); + + if (map.find(Opcode) == map.end()) { + report_fatal_error("unexpected opcode for pvfmk"); + } + + unsigned OpcodeUpper = map[Opcode][0]; + unsigned OpcodeLower = map[Opcode][1]; + + MachineBasicBlock *MBB = MI.getParent(); + DebugLoc dl = MI.getDebugLoc(); + MachineInstrBuilder Bu = BuildMI(*MBB, MI, dl, TI.get(OpcodeUpper)); + MachineInstrBuilder Bl = BuildMI(*MBB, MI, dl, TI.get(OpcodeLower)); + + // VM512 + Bu.addReg(GetVM512Upper(MI.getOperand(0).getReg())); + Bl.addReg(GetVM512Lower(MI.getOperand(0).getReg())); + + if (MI.getNumOperands() == 2) { // _Ml: VM512, VL + // VL + Bu.addReg(MI.getOperand(1).getReg()); + Bl.addReg(MI.getOperand(1).getReg()); + } else if (MI.getNumOperands() == 3) { // _Mvl: VM512, VR, VL + // VR + Bu.addReg(MI.getOperand(1).getReg()); + Bl.addReg(MI.getOperand(1).getReg()); + // VL + Bu.addReg(MI.getOperand(2).getReg()); + Bl.addReg(MI.getOperand(2).getReg()); + } else if (MI.getNumOperands() == 4) { // _MvMl: VM512, VR, VM512, VL + // VR + Bu.addReg(MI.getOperand(1).getReg()); + Bl.addReg(MI.getOperand(1).getReg()); + // VM512 + Bu.addReg(GetVM512Upper(MI.getOperand(2).getReg())); + Bl.addReg(GetVM512Lower(MI.getOperand(2).getReg())); + // VL + Bu.addReg(MI.getOperand(3).getReg()); + Bl.addReg(MI.getOperand(3).getReg()); + } else { + report_fatal_error("unexpected number of operands for pvfmk"); + } + + MI.eraseFromParent(); +} + +bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { + switch (MI.getOpcode()) { + case VE::EXTEND_STACK: { + return expandExtendStackPseudo(MI); + } + case VE::EXTEND_STACK_GUARD: { + MI.eraseFromParent(); // The pseudo instruction is gone now. + return true; + } + case TargetOpcode::LOAD_STACK_GUARD: { + assert(Subtarget.isTargetLinux() && + "Only Linux target is expected to contain LOAD_STACK_GUARD"); + report_fatal_error( + "expandPostRAPseudo for LOAD_STACK_GUARD is not implemented yet"); + } + case VE::GETSTACKTOP: { + return expandGetStackTopPseudo(MI); + } +#if 0 + case VE::VE_SELECT: { + // (VESelect $dst, $CC, $condVal, $trueVal, $dst) + // -> (CMOVrr $dst, condCode, $trueVal, $condVal) + // cmov.$df.$cf $dst, $trueval, $cond + + assert(MI.getOperand(0).getReg() == MI.getOperand(4).getReg()); + + MachineBasicBlock* MBB = MI.getParent(); + DebugLoc dl = MI.getDebugLoc(); + BuildMI(*MBB, MI, dl, get(VE::CMOVWrr)) + .addReg(MI.getOperand(0).getReg()) + .addImm(MI.getOperand(1).getImm()) + .addReg(MI.getOperand(3).getReg()) + .addReg(MI.getOperand(2).getReg()); + + MI.eraseFromParent(); + return true; + } +#endif + + case VE::andm_MMM: + buildVMRInst(MI, get(VE::andm_mmm)); + return true; + case VE::orm_MMM: + buildVMRInst(MI, get(VE::orm_mmm)); + return true; + case VE::xorm_MMM: + buildVMRInst(MI, get(VE::xorm_mmm)); + return true; + case VE::eqvm_MMM: + buildVMRInst(MI, get(VE::eqvm_mmm)); + return true; + case VE::nndm_MMM: + buildVMRInst(MI, get(VE::nndm_mmm)); + return true; + case VE::negm_MM: + buildVMRInst(MI, get(VE::negm_mm)); + return true; + + case VE::lvm_MMIs: { + unsigned VMXu = GetVM512Upper(MI.getOperand(0).getReg()); + unsigned VMXl = GetVM512Lower(MI.getOperand(0).getReg()); + unsigned VMDu = GetVM512Upper(MI.getOperand(1).getReg()); + unsigned VMDl = GetVM512Upper(MI.getOperand(1).getReg()); + int64_t imm = MI.getOperand(2).getImm(); + unsigned VMX = VMXl; + unsigned VMD = VMDl; + if (imm >= 4) { + VMX = VMXu; + VMD = VMDu; + imm -= 4; + } + MachineBasicBlock *MBB = MI.getParent(); + DebugLoc dl = MI.getDebugLoc(); + BuildMI(*MBB, MI, dl, get(VE::lvm_mmIs)) + .addDef(VMX) + .addReg(VMD) + .addImm(imm) + .addReg(MI.getOperand(3).getReg()); + MI.eraseFromParent(); + return true; + } + + case VE::svm_sMI: { + unsigned VMZu = GetVM512Upper(MI.getOperand(1).getReg()); + unsigned VMZl = GetVM512Lower(MI.getOperand(1).getReg()); + int64_t imm = MI.getOperand(2).getImm(); + unsigned VMZ = VMZl; + if (imm >= 4) { + VMZ = VMZu; + imm -= 4; + } + MachineBasicBlock *MBB = MI.getParent(); + DebugLoc dl = MI.getDebugLoc(); + BuildMI(*MBB, MI, dl, get(VE::svm_smI)) + .add(MI.getOperand(0)) + .addReg(VMZ) + .addImm(imm); + MI.eraseFromParent(); + return true; + } + case VE::pvfmkat_Ml: + case VE::pvfmkaf_Ml: + case VE::pvfmkwgt_Mvl: + case VE::pvfmkwgt_MvMl: + case VE::pvfmkwlt_Mvl: + case VE::pvfmkwlt_MvMl: + case VE::pvfmkwne_Mvl: + case VE::pvfmkwne_MvMl: + case VE::pvfmkweq_Mvl: + case VE::pvfmkweq_MvMl: + case VE::pvfmkwge_Mvl: + case VE::pvfmkwge_MvMl: + case VE::pvfmkwle_Mvl: + case VE::pvfmkwle_MvMl: + case VE::pvfmkwnum_Mvl: + case VE::pvfmkwnum_MvMl: + case VE::pvfmkwnan_Mvl: + case VE::pvfmkwnan_MvMl: + case VE::pvfmkwgtnan_Mvl: + case VE::pvfmkwgtnan_MvMl: + case VE::pvfmkwltnan_Mvl: + case VE::pvfmkwltnan_MvMl: + case VE::pvfmkwnenan_Mvl: + case VE::pvfmkwnenan_MvMl: + case VE::pvfmkweqnan_Mvl: + case VE::pvfmkweqnan_MvMl: + case VE::pvfmkwgenan_Mvl: + case VE::pvfmkwgenan_MvMl: + case VE::pvfmkwlenan_Mvl: + case VE::pvfmkwlenan_MvMl: + case VE::pvfmksgt_Mvl: + case VE::pvfmksgt_MvMl: + case VE::pvfmkslt_Mvl: + case VE::pvfmkslt_MvMl: + case VE::pvfmksne_Mvl: + case VE::pvfmksne_MvMl: + case VE::pvfmkseq_Mvl: + case VE::pvfmkseq_MvMl: + case VE::pvfmksge_Mvl: + case VE::pvfmksge_MvMl: + case VE::pvfmksle_Mvl: + case VE::pvfmksle_MvMl: + case VE::pvfmksnum_Mvl: + case VE::pvfmksnum_MvMl: + case VE::pvfmksnan_Mvl: + case VE::pvfmksnan_MvMl: + case VE::pvfmksgtnan_Mvl: + case VE::pvfmksgtnan_MvMl: + case VE::pvfmksltnan_Mvl: + case VE::pvfmksltnan_MvMl: + case VE::pvfmksnenan_Mvl: + case VE::pvfmksnenan_MvMl: + case VE::pvfmkseqnan_Mvl: + case VE::pvfmkseqnan_MvMl: + case VE::pvfmksgenan_Mvl: + case VE::pvfmksgenan_MvMl: + case VE::pvfmkslenan_Mvl: + case VE::pvfmkslenan_MvMl: { + expandPseudoVFMK_VL(*this, MI); + return true; + } + } + return false; +} + +bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const { + MachineBasicBlock &MBB = *MI.getParent(); + MachineFunction &MF = *MBB.getParent(); + const VEInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + DebugLoc dl = MBB.findDebugLoc(MI); + + // Create following instructions and multiple basic blocks. + // + // thisBB: + // brge.l.t %sp, %sl, sinkBB + // syscallBB: + // ld %s61, 0x18(, %tp) // load param area + // or %s62, 0, %s0 // spill the value of %s0 + // lea %s63, 0x13b // syscall # of grow + // shm.l %s63, 0x0(%s61) // store syscall # at addr:0 + // shm.l %sl, 0x8(%s61) // store old limit at addr:8 + // shm.l %sp, 0x10(%s61) // store new limit at addr:16 + // monc // call monitor + // or %s0, 0, %s62 // restore the value of %s0 + // sinkBB: + + // Create new MBB + MachineBasicBlock *BB = &MBB; + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB); + MachineFunction::iterator It = ++(BB->getIterator()); + MF.insert(It, syscallMBB); + MF.insert(It, sinkMBB); + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), BB, + std::next(std::next(MachineBasicBlock::iterator(MI))), + BB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(BB); + + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(syscallMBB); + BB->addSuccessor(sinkMBB); + BuildMI(BB, dl, TII.get(VE::BCRLrr)) + .addImm(VECC::CC_IGE) + .addReg(VE::SX11) // %sp + .addReg(VE::SX8) // %sl + .addMBB(sinkMBB); + + BB = syscallMBB; + + // Update machine-CFG edges + BB->addSuccessor(sinkMBB); + + BuildMI(BB, dl, TII.get(VE::LDSri), VE::SX61).addReg(VE::SX14).addImm(0x18); + BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62).addReg(VE::SX0).addImm(0); + BuildMI(BB, dl, TII.get(VE::LEAzzi), VE::SX63).addImm(0x13b); + BuildMI(BB, dl, TII.get(VE::SHMri)) + .addReg(VE::SX61) + .addImm(0) + .addReg(VE::SX63); + BuildMI(BB, dl, TII.get(VE::SHMri)) + .addReg(VE::SX61) + .addImm(8) + .addReg(VE::SX8); + BuildMI(BB, dl, TII.get(VE::SHMri)) + .addReg(VE::SX61) + .addImm(16) + .addReg(VE::SX11); + BuildMI(BB, dl, TII.get(VE::MONC)); + + BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0).addReg(VE::SX62).addImm(0); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return true; +} + +bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const { + MachineBasicBlock *MBB = MI.getParent(); + MachineFunction &MF = *MBB->getParent(); + const VEInstrInfo &TII = + *static_cast(MF.getSubtarget().getInstrInfo()); + DebugLoc dl = MBB->findDebugLoc(MI); + + // Create following instruction + // + // dst = %sp + stack_size + + const MachineFrameInfo &MFI = MF.getFrameInfo(); + + const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); + unsigned NumBytes = 176; + if (MFI.adjustsStack() && TFL->hasReservedCallFrame(MF)) + NumBytes += MFI.getMaxCallFrameSize(); + + BuildMI(*MBB, MI, dl, TII.get(VE::LEArzi)) + .addDef(MI.getOperand(0).getReg()) + .addReg(VE::SX11) + .addImm(NumBytes); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return true; +} diff --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrInfo.td @@ -0,0 +1,2280 @@ +//===-- VEInstrInfo.td - Target Description for VE Target -----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the VE instructions in TableGen format. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction format superclass +//===----------------------------------------------------------------------===// + +include "VEInstrFormats.td" + +//===----------------------------------------------------------------------===// +// Feature predicates. +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction Pattern Stuff +//===----------------------------------------------------------------------===// + +def simm7 : PatLeaf<(imm), [{ return isInt<7>(N->getSExtValue()); }]>; +def simm32 : PatLeaf<(imm), [{ return isInt<32>(N->getSExtValue()); }]>; +def uimm32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>; +def uimm6 : PatLeaf<(imm), [{ return isUInt<6>(N->getZExtValue()); }]>; +def uimm7 : PatLeaf<(imm), [{ return isUInt<7>(N->getZExtValue()); }]>; +def zero : PatLeaf<(imm), [{ return N->getSExtValue() == 0; }]>; +def lomsbzero : PatLeaf<(imm), [{ return (N->getZExtValue() & 0x80000000) + == 0; }]>; +def lozero : PatLeaf<(imm), [{ return (N->getZExtValue() & 0xffffffff) + == 0; }]>; +def fplomsbzero : PatLeaf<(fpimm), [{ return (N->getValueAPF().bitcastToAPInt() + .getZExtValue() & 0x80000000) == 0; }]>; +def fplozero : PatLeaf<(fpimm), [{ return (N->getValueAPF().bitcastToAPInt() + .getZExtValue() & 0xffffffff) == 0; }]>; + +def CCSIOp : PatLeaf<(cond), [{ + switch (N->get()) { + default: return true; + case ISD::SETULT: + case ISD::SETULE: + case ISD::SETUGT: + case ISD::SETUGE: return false; + } +}]>; + +def CCUIOp : PatLeaf<(cond), [{ + switch (N->get()) { + default: return true; + case ISD::SETLT: + case ISD::SETLE: + case ISD::SETGT: + case ISD::SETGE: return false; + } +}]>; + +def GetVL : SDNodeXFormgetMachineFunction(); + unsigned VLReg = MF.getSubtarget().getInstrInfo()->getVectorLengthReg(&MF); + return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(N), VLReg, MVT::i32); +}]>; + +def LOFP32 : SDNodeXFormgetValueAPF().bitcastToAPInt(); + return CurDAG->getTargetConstant((unsigned)(imm.getZExtValue() & 0xffffffff), + SDLoc(N), MVT::i64); +}]>; + +def HIFP32 : SDNodeXFormgetValueAPF().bitcastToAPInt(); + return CurDAG->getTargetConstant((unsigned)(imm.getZExtValue() >> 32), + SDLoc(N), MVT::i64); +}]>; + +def LO32 : SDNodeXFormgetTargetConstant((unsigned)(N->getZExtValue() & 0xffffffff), + SDLoc(N), MVT::i64); +}]>; + +def HI32 : SDNodeXFormgetTargetConstant((unsigned)(N->getZExtValue() >> 32), + SDLoc(N), MVT::i64); +}]>; + +def LEASLimm : PatLeaf<(imm), [{ + return isShiftedUInt<32, 32>(N->getZExtValue()); +}], HI32>; + +def trunc_imm : SDNodeXFormgetTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32); +}]>; + +def sext_imm : SDNodeXFormgetTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64); +}]>; + +def zext_imm : SDNodeXFormgetTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i64); +}]>; + +def icond2cc : SDNodeXFormget()) { + default: llvm_unreachable("Unknown integer condition code!"); + case ISD::SETEQ: cc = VECC::CC_IEQ; break; + case ISD::SETNE: cc = VECC::CC_INE; break; + case ISD::SETLT: cc = VECC::CC_IL; break; + case ISD::SETGT: cc = VECC::CC_IG; break; + case ISD::SETLE: cc = VECC::CC_ILE; break; + case ISD::SETGE: cc = VECC::CC_IGE; break; + case ISD::SETULT: cc = VECC::CC_IL; break; + case ISD::SETULE: cc = VECC::CC_ILE; break; + case ISD::SETUGT: cc = VECC::CC_IG; break; + case ISD::SETUGE: cc = VECC::CC_IGE; break; + } + return CurDAG->getTargetConstant(cc, SDLoc(N), MVT::i32); +}]>; + +def fcond2cc : SDNodeXFormget()) { + default: llvm_unreachable("Unknown float condition code!"); + case ISD::SETFALSE: cc = VECC::CC_AF; break; + case ISD::SETEQ: + case ISD::SETOEQ: cc = VECC::CC_EQ; break; + case ISD::SETNE: + case ISD::SETONE: cc = VECC::CC_NE; break; + case ISD::SETLT: + case ISD::SETOLT: cc = VECC::CC_L; break; + case ISD::SETGT: + case ISD::SETOGT: cc = VECC::CC_G; break; + case ISD::SETLE: + case ISD::SETOLE: cc = VECC::CC_LE; break; + case ISD::SETGE: + case ISD::SETOGE: cc = VECC::CC_GE; break; + case ISD::SETO: cc = VECC::CC_NUM; break; + case ISD::SETUO: cc = VECC::CC_NAN; break; + case ISD::SETUEQ: cc = VECC::CC_EQNAN; break; + case ISD::SETUNE: cc = VECC::CC_NENAN; break; + case ISD::SETULT: cc = VECC::CC_LNAN; break; + case ISD::SETUGT: cc = VECC::CC_GNAN; break; + case ISD::SETULE: cc = VECC::CC_LENAN; break; + case ISD::SETUGE: cc = VECC::CC_GENAN; break; + case ISD::SETTRUE: cc = VECC::CC_AT; break; + } + return CurDAG->getTargetConstant(cc, SDLoc(N), MVT::i32); +}]>; + +// Addressing modes. +def ADDRrr : ComplexPattern; +def ADDRri : ComplexPattern; + +// Address operands +def VEMEMrrAsmOperand : AsmOperandClass { + let Name = "MEMrr"; + let ParserMethod = "parseMEMOperand"; +} + +def VEMEMriAsmOperand : AsmOperandClass { + let Name = "MEMri"; + let ParserMethod = "parseMEMOperand"; +} + +// ASX format of memory address + +def MEMrr : Operand { + let PrintMethod = "printMemASXOperand"; + let MIOperandInfo = (ops ptr_rc, ptr_rc); + let ParserMatchClass = VEMEMrrAsmOperand; +} + +def MEMri : Operand { + let PrintMethod = "printMemASXOperand"; + let MIOperandInfo = (ops ptr_rc, i64imm); + let ParserMatchClass = VEMEMriAsmOperand; +} + +// AS format of memory address + +def MEMASri : Operand { + let PrintMethod = "printMemASOperand"; + let MIOperandInfo = (ops ptr_rc, i64imm); + let ParserMatchClass = VEMEMriAsmOperand; +} + +// Branch targets have OtherVT type. +def brtarget32 : Operand { + let EncoderMethod = "getBranchTarget32OpValue"; +} + +def TLSSym : Operand; + +// Branch targets have OtherVT type. +def brtarget : Operand { + let EncoderMethod = "getBranchTargetOpValue"; +} + +def calltarget : Operand { + let EncoderMethod = "getCallTargetOpValue"; + let DecoderMethod = "DecodeCall"; +} + +def simm7Op32 : Operand { + let DecoderMethod = "DecodeSIMM7"; +} + +def simm7Op64 : Operand { + let DecoderMethod = "DecodeSIMM7"; +} + +def simm7Op128 : Operand { + let DecoderMethod = "DecodeSIMM7"; +} + +def simm32Op32 : Operand { + let DecoderMethod = "DecodeSIMM32"; +} + +def simm32Op64 : Operand { + let DecoderMethod = "DecodeSIMM32"; +} + +def uimm7Op32 : Operand { + let DecoderMethod = "DecodeUIMM6"; +} + +def uimm6Op32 : Operand { + let DecoderMethod = "DecodeUIMM6"; +} + +def uimm6Op64 : Operand { + let DecoderMethod = "DecodeUIMM6"; +} + +def uimm6Op128 : Operand { + let DecoderMethod = "DecodeUIMM6"; +} + +// Operand for printing out a condition code. +let PrintMethod = "printCCOperand" in + def CCOp : Operand; + +def VEhi : SDNode<"VEISD::Hi", SDTIntUnaryOp>; +def VElo : SDNode<"VEISD::Lo", SDTIntUnaryOp>; + +// These are target-independent nodes, but have target-specific formats. +def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i64>, + SDTCisVT<1, i64> ]>; +def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i64>, + SDTCisVT<1, i64> ]>; + +def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart, + [SDNPHasChain, SDNPOutGlue]>; +def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + +def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, i64>]>; +def call : SDNode<"VEISD::CALL", SDT_SPCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; + +def retflag : SDNode<"VEISD::RET_FLAG", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + +def getGOT : Operand { + let PrintMethod = "printGetGOT"; +} +def getFunPLT : Operand { + let PrintMethod = "printGetFunPLT"; +} + +def VEmax : SDNode<"VEISD::MAX", SDTIntBinOp>; +def VEmin : SDNode<"VEISD::MIN", SDTIntBinOp>; +def VEfmax : SDNode<"VEISD::FMAX", SDTFPBinOp>; +def VEfmin : SDNode<"VEISD::FMIN", SDTFPBinOp>; + +def VEeh_sjlj_setjmp: SDNode<"VEISD::EH_SJLJ_SETJMP", + SDTypeProfile<1, 1, [SDTCisInt<0>, + SDTCisPtrTy<1>]>, + [SDNPHasChain, SDNPSideEffect]>; +def VEeh_sjlj_longjmp: SDNode<"VEISD::EH_SJLJ_LONGJMP", + SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, + [SDNPHasChain, SDNPSideEffect]>; +def VEeh_sjlj_setup_dispatch: SDNode<"VEISD::EH_SJLJ_SETUP_DISPATCH", + SDTypeProfile<0, 0, []>, + [SDNPHasChain, SDNPSideEffect]>; + +// GETFUNPLT for PIC +def GetFunPLT : SDNode<"VEISD::GETFUNPLT", SDTIntUnaryOp>; + +// GETTLSADDR for TLS +def GetTLSAddr : SDNode<"VEISD::GETTLSADDR", SDT_SPCall, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, + SDNPVariadic]>; + +// GETSTACKTOP +def GetStackTop : SDNode<"VEISD::GETSTACKTOP", SDTNone, + [SDNPHasChain, SDNPSideEffect]>; + +// MEMBARRIER +def MemBarrier : SDNode<"VEISD::MEMBARRIER", SDTNone, + [SDNPHasChain, SDNPSideEffect]>; + +//===----------------------------------------------------------------------===// +// VE Flag Conditions +//===----------------------------------------------------------------------===// + +// Note that these values must be kept in sync with the CCOp::CondCode enum +// values. +class CC_VAL : PatLeaf<(i32 N)>; +def CC_IG : CC_VAL< 0>; // Greater +def CC_IL : CC_VAL< 1>; // Less +def CC_INE : CC_VAL< 2>; // Not Equal +def CC_IEQ : CC_VAL< 3>; // Equal +def CC_IGE : CC_VAL< 4>; // Greater or Equal +def CC_ILE : CC_VAL< 5>; // Less or Equal +def CC_AF : CC_VAL< 6>; // Always false +def CC_G : CC_VAL< 7>; // Greater +def CC_L : CC_VAL< 8>; // Less +def CC_NE : CC_VAL< 9>; // Not Equal +def CC_EQ : CC_VAL<10>; // Equal +def CC_GE : CC_VAL<11>; // Greater or Equal +def CC_LE : CC_VAL<12>; // Less or Equal +def CC_NUM : CC_VAL<13>; // Number +def CC_NAN : CC_VAL<14>; // NaN +def CC_GNAN : CC_VAL<15>; // Greater or NaN +def CC_LNAN : CC_VAL<16>; // Less or NaN +def CC_NENAN : CC_VAL<17>; // Not Equal or NaN +def CC_EQNAN : CC_VAL<18>; // Equal or NaN +def CC_GENAN : CC_VAL<19>; // Greater or Equal or NaN +def CC_LENAN : CC_VAL<20>; // Less or Equal or NaN +def CC_AT : CC_VAL<21>; // Always true + +//===----------------------------------------------------------------------===// +// VE Multiclasses for common instruction formats +//===----------------------------------------------------------------------===// + +multiclass RMmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def rri : RM< + opc, (outs RC:$sx), (ins RC:$sy, RC:$sz, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}($sy, ${sz})"), + [(set Ty:$sx, (OpNode (OpNode Ty:$sy, Ty:$sz), (Ty simm32:$imm32)))]> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def rii : RM< + opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}($sy, ${sz})"), + [/* Not define DAG pattern here to avoid llvm uses LEArii for add + instructions. + (set Ty:$sx, (OpNode (OpNode Ty:$sz, (Ty simm7:$sy)), (Ty simm32:$imm32)))*/]> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rzi : RM< + opc, (outs RC:$sx), (ins RC:$sz, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}(${sz})"), + [(set Ty:$sx, (OpNode Ty:$sz, (Ty simm32:$imm32)))]> { + let cy = 0; + let sy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def zii : RM< + opc, (outs RC:$sx), (ins immOp:$sy, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}(${sy})"), + [/* Not define DAG pattern here to avoid llvm uses LEAzii for all add + instructions. + (set Ty:$sx, (OpNode (Ty simm7:$sy), (Ty simm32:$imm32))) */]> { + let cy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } + def zzi : RM< + opc, (outs RC:$sx), (ins immOp2:$imm32), + !strconcat(opcStr, " $sx, $imm32"), + [/* Not define DAG pattern here to avoid llvm uses LEAzzi for all set + instructions. + (set Ty:$sx, (Ty simm32:$imm32)) */]> { + let cy = 0; + let sy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } +} + +// RRNDm is similar to RRm without DAG patterns. + +multiclass RMNDmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def rri : RM< + opc, (outs RC:$sx), (ins RC:$sy, RC:$sz, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}($sy, ${sz})"), []> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def rii : RM< + opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}($sy, ${sz})"), []> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rzi : RM< + opc, (outs RC:$sx), (ins RC:$sz, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}(${sz})"), []> { + let cy = 0; + let sy = 0; + let hasSideEffects = 0; + let cz = 1; + } + def zii : RM< + opc, (outs RC:$sx), (ins immOp:$sy, immOp2:$imm32), + !strconcat(opcStr, " $sx, ${imm32}(${sy})"), []> { + let cy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } + def zzi : RM< + opc, (outs RC:$sx), (ins immOp2:$imm32), + !strconcat(opcStr, " $sx, $imm32"), []> { + let cy = 0; + let sy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } +} + +let Constraints = "$sx = $sd", DisableEncoding = "$sd" in +multiclass RRCASmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def asr : RM< + opc, (outs RC:$sx), (ins MEMASri:$addr, RC:$sy, RC:$sd), + !strconcat(opcStr, " $sx, $addr, $sy"), []> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def asi : RM< + opc, (outs RC:$sx), (ins MEMASri:$addr, immOp:$sy, RC:$sd), + !strconcat(opcStr, " $sx, $addr, $sy"), []> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rir : RM< + opc, (outs RC:$sx), (ins RC:$sz, immOp2:$imm32, RC:$sy, RC:$sd), + !strconcat(opcStr, " $sx, ${imm32}(${sz}), $sy"), []> { + let cy = 1; + let cz = 1; + let isCodeGenOnly = 1; + let hasSideEffects = 0; + } + def rii : RM< + opc, (outs RC:$sx), (ins RC:$sz, immOp2:$imm32, immOp:$sy, RC:$sd), + !strconcat(opcStr, " $sx, ${imm32}(${sz}), $sy"), []> { + let cy = 0; + let cz = 1; + let isCodeGenOnly = 1; + let hasSideEffects = 0; + } + def zii : RM< + opc, (outs RC:$sx), (ins immOp2:$imm32, immOp:$sy, RC:$sd), + !strconcat(opcStr, " $sx, $imm32, $sy"), []> { + let cy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } +} + +// Multiclass for RR type instructions + +// First, defines components +// Named like RRm if each has their own DAG pattern +// Named like RRNDm if each doesn't have their own DAG pattern + +multiclass RRmrropc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi> { + def rr : RR + { let cy = 1; let cz = 1; let hasSideEffects = 0; } +} + +multiclass RRNDmrropc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi> { + def rr : RR + { let cy = 1; let cz = 1; let hasSideEffects = 0; } +} + +multiclass RRmriopc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp> { + // VE calculates (OpNode $sy, $sz), but llvm requires to have immediate + // in RHS, so we use following definition. + def ri : RR + { let cy = 0; let cz = 1; let hasSideEffects = 0; } +} + +multiclass RRmiropc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp> { + def ri : RR + { let cy = 0; let cz = 1; let hasSideEffects = 0; } +} + +multiclass RRNDmiropc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp> { + def ri : RR + { let cy = 0; let cz = 1; let hasSideEffects = 0; } +} + +multiclass RRmizopc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp> { + def zi : RR + { let cy = 0; let cz = 0; let sz = 0; let hasSideEffects = 0; } +} + +multiclass RRNDmizopc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp> { + def zi : RR + { let cy = 0; let cz = 0; let sz = 0; let hasSideEffects = 0; } +} + +multiclass RRNDmrmopc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp2> { + def rm0 : RR { + let cy = 1; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when + // it fails to infer from a pattern. + let hasSideEffects = 0; + } + def rm1 : RR { + let cy = 1; + let cz = 0; + let hasSideEffects = 0; + } +} + +multiclass RRNDmimopc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, + Operand immOp, Operand immOp2> { + def im0 : RR { + let cy = 0; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when + // it fails to infer from a pattern. + let hasSideEffects = 0; + } + def im1 : RR { + let cy = 0; + let cz = 0; + let hasSideEffects = 0; + } +} + +// Used by add, mul, div, and similar commutative instructions +// The order of operands are "$sx, $sy, $sz" + +multiclass RRmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> : + RRmrr, + RRmri, + RRmiz, + RRNDmrm, + RRNDmim; + +// Used by sub, and similar not commutative instructions +// The order of operands are "$sx, $sy, $sz" + +multiclass RRNCmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> : + RRmrr, + RRmir, + RRmiz, + RRNDmrm, + RRNDmim; + +// Used by fadd, fsub, and similar floating point instructions +// The order of operands are "$sx, $sy, $sz" + +multiclass RRFmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> : + RRmrr, + RRNDmir, + RRNDmiz, + RRNDmrm, + RRNDmim; + +// Used by cmp instruction +// The order of operands are "$sx, $sy, $sz" + +multiclass RRNDmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, + Operand immOp, Operand immOp2> : + RRNDmrr, + RRNDmir, + RRNDmiz, + RRNDmrm, + RRNDmim; + +// Used by fcq instruction like "F64 <- cmp F128, F128" +// The order of operands are "$sx, $sy, $sz" + +multiclass RRFCQmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, + Operand immOp, Operand immOp2> : + RRNDmrr, + RRNDmir, + RRNDmiz, + RRNDmrm, + RRNDmim; + +// Multiclass for RR type instructions +// Used by sra, sla, sll, and similar instructions +// The order of operands are "$sx, $sz, $sy" + +multiclass RRImopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def rr : RR< + opc, (outs RC:$sx), (ins RC:$sz, I32:$sy), + !strconcat(opcStr, " $sx, $sz, $sy"), + [(set Ty:$sx, (OpNode Ty:$sz, i32:$sy))]> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def ri : RR< + opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy), + !strconcat(opcStr, " $sx, $sz, $sy"), + [(set Ty:$sx, (OpNode Ty:$sz, (i32 simm7:$sy)))]> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rm0 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy), + !strconcat(opcStr, " $sx, (${sz})0, $sy"), + []> { + let cy = 1; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when it fails to infer from a pattern. + let hasSideEffects = 0; + } + def rm1 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy), + !strconcat(opcStr, " $sx, (${sz})1, $sy"), + []> { + let cy = 1; + let cz = 0; + let hasSideEffects = 0; + } + def im0 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy), + !strconcat(opcStr, " $sx, (${sz})0, $sy"), + []> { + let cy = 0; + let cz = 0; + let sz{6} = 1; + let hasSideEffects = 0; + } + def im1 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy), + !strconcat(opcStr, " $sx, (${sz})1, $sy"), + []> { + let cy = 0; + let cz = 0; + let hasSideEffects = 0; + } + def zi : RR< + opc, (outs RC:$sx), (ins immOp:$sy), + !strconcat(opcStr, " $sx, $sy"), + [(set Ty:$sx, (OpNode 0, (i32 simm7:$sy)))]> { + let cy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } +} + +// Multiclass for RR type instructions without dag pattern +// Used by sra.w.zx, sla.w.zx, and others + +multiclass RRINDmopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def rr : RR< + opc, (outs RC:$sx), (ins RC:$sz, I32:$sy), + !strconcat(opcStr, " $sx, $sz, $sy"), + []> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def ri : RR< + opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy), + !strconcat(opcStr, " $sx, $sz, $sy"), + []> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rm0 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy), + !strconcat(opcStr, " $sx, (${sz})0, $sy"), + []> { + let cy = 1; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when it fails to infer from a pattern. + let hasSideEffects = 0; + } + def rm1 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, I32:$sy), + !strconcat(opcStr, " $sx, (${sz})1, $sy"), + []> { + let cy = 1; + let cz = 0; + let hasSideEffects = 0; + } + def im0 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy), + !strconcat(opcStr, " $sx, (${sz})0, $sy"), + []> { + let cy = 0; + let cz = 0; + let sz{6} = 1; + let hasSideEffects = 0; + } + def im1 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz, immOp:$sy), + !strconcat(opcStr, " $sx, (${sz})1, $sy"), + []> { + let cy = 0; + let cz = 0; + let hasSideEffects = 0; + } + def zi : RR< + opc, (outs RC:$sx), (ins immOp:$sy), + !strconcat(opcStr, " $sx, $sy"), + []> { + let cy = 0; + let cz = 0; + let sz = 0; + let hasSideEffects = 0; + } +} + +// Multiclass for RR type instructions +// Used by cmov instruction + +let Constraints = "$sx = $sd", DisableEncoding = "$sd" in +multiclass RRCMOVmopc, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def rr : RR< + opc, (outs I64:$sx), (ins CCOp:$cf, RC:$sy, I64:$sz, I64:$sd), + !strconcat(opcStr, " $sx, $sz, $sy"), + []> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def ri : RR< + opc, (outs I64:$sx), (ins CCOp:$cf, I64:$sz, immOp:$sy, I64:$sd), + !strconcat(opcStr, " $sx, $sz, $sy"), + []> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rm0 : RR< + opc, (outs I64:$sx), (ins CCOp:$cf, RC:$sy, immOp2:$sz, I64:$sd), + !strconcat(opcStr, " $sx, (${sz})0, $sy"), + []> { + let cy = 1; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when it fails to infer from a pattern. + let hasSideEffects = 0; + } + def rm1 : RR< + opc, (outs I64:$sx), (ins CCOp:$cf, RC:$sy, immOp2:$sz, I64:$sd), + !strconcat(opcStr, " $sx, (${sz})1, $sy"), + []> { + let cy = 1; + let cz = 0; + let hasSideEffects = 0; + } + def im0 : RR< + opc, (outs I64:$sx), (ins CCOp:$cf, immOp:$sy, immOp2:$sz, I64:$sd), + !strconcat(opcStr, " $sx, (${sz})0, $sy"), + []> { + let cy = 0; + let cz = 0; + let sz{6} = 1; + let hasSideEffects = 0; + } + def im1 : RR< + opc, (outs I64:$sx), (ins CCOp:$cf, immOp:$sy, immOp2:$sz, I64:$sd), + !strconcat(opcStr, " $sx, (${sz})1, $sy"), + []> { + let cy = 0; + let cz = 0; + let hasSideEffects = 0; + } +} + +// Multiclass for RR type instructions with only 2 operands +// Used by pcnt, brv + +multiclass RRI2mopc, SDNode OpNode, + RegisterClass RC, ValueType Ty, Operand immOp2> { + def r : RR< + opc, (outs RC:$sx), (ins RC:$sz), + !strconcat(opcStr, " $sx, $sz"), + [(set Ty:$sx, (OpNode Ty:$sz))]> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def i : RR< + opc, (outs RC:$sx), (ins RC:$sz), + !strconcat(opcStr, " $sx, $sz"), + [(set Ty:$sx, (OpNode Ty:$sz))]> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def m0 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz), + !strconcat(opcStr, " $sx, (${sz})0"), + []> { + let cy = 1; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when it fails to infer from a pattern. + let hasSideEffects = 0; + } + def m1 : RR< + opc, (outs RC:$sx), (ins immOp2:$sz), + !strconcat(opcStr, " $sx, (${sz})1"), + []> { + let cy = 1; + let cz = 0; + let hasSideEffects = 0; + } +} + + +// Branch multiclass +let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in +multiclass BCRm opc, + RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> { + def rr : CF< + opc, (outs), + (ins CCOp:$cf, RC:$sy, RC:$sz, brtarget32:$imm32), + !strconcat(opcStr, " $sy, $sz, $imm32"), []> { + let cy = 1; + let cz = 1; + let hasSideEffects = 0; + } + def ir : CF< + opc, (outs), + (ins CCOp:$cf, immOp:$sy, RC:$sz, brtarget32:$imm32), + !strconcat(opcStr, " $sy, $sz, $imm32"), []> { + let cy = 0; + let cz = 1; + let hasSideEffects = 0; + } + def rm0 : CF< + opc, (outs), (ins CCOp:$cf, RC:$sy, immOp2:$sz, brtarget32:$imm32), + !strconcat(opcStr, " $sy, (${sz})0, $imm32"), []> { + let cy = 1; + let cz = 0; + let sz{6} = 1; + // (guess) tblgen conservatively assumes hasSideEffects when it fails to infer from a pattern. + let hasSideEffects = 0; + } + def rm1 : CF< + opc, (outs), (ins CCOp:$cf, RC:$sy, immOp2:$sz, brtarget32:$imm32), + !strconcat(opcStr, " $sy, (${sz})1, $imm32"), []> { + let cy = 1; + let cz = 0; + let hasSideEffects = 0; + } + def im0 : CF< + opc, (outs), (ins CCOp:$cf, immOp:$sy, immOp2:$sz, brtarget32:$imm32), + !strconcat(opcStr, " $sy, (${sz})0, $imm32"), []> { + let cy = 0; + let cz = 0; + let sz{6} = 1; + let hasSideEffects = 0; + } + def im1 : CF< + opc, (outs), (ins CCOp:$cf, immOp:$sy, immOp2:$sz, brtarget32:$imm32), + !strconcat(opcStr, " $sy, (${sz})1, $imm32"), []> { + let cy = 0; + let cz = 0; + let hasSideEffects = 0; + } + def a : CF< + opc, (outs), (ins brtarget32:$imm32), + !strconcat(opcStrAt, " $imm32"), []> { + let cy = 0; + let sy = 0; + let cz = 0; + let sz = 0; + let cf = 15; /* AT */ + let isBarrier = 1; + let hasSideEffects = 0; + } +} + +// Multiclass for floating point conversion instructions. +// Used by CVS/CVD/FLT and others +multiclass CVTm opc, SDNode OpNode, + RegisterClass RCo, ValueType Tyo, + RegisterClass RCi, ValueType Tyi, Operand immOp> { + def r : RR { + let cy = 1; + let hasSideEffects = 0; + } + def i : RR { + let cy = 0; + let hasSideEffects = 0; + } +} + + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// + +// CMOV instructions +let cx = 0, cw = 0, cw2 = 0 in +defm CMOVL : RRCMOVm<"cmov.l.${cf}", 0x3B, I64, i64, simm7Op64, uimm6Op64>; + +let cx = 0, cw = 1, cw2 = 0 in +defm CMOVW : RRCMOVm<"cmov.w.${cf}", 0x3B, I32, i32, simm7Op64, uimm6Op32>; + +let cx = 0, cw = 0, cw2 = 1 in +defm CMOVD : RRCMOVm<"cmov.d.${cf}", 0x3B, I64, f64, simm7Op64, uimm6Op64>; + +let cx = 0, cw = 1, cw2 = 1 in +defm CMOVS : RRCMOVm<"cmov.s.${cf}", 0x3B, F32, f32, simm7Op64, uimm6Op32>; + +// NOP instruction +let cx = 0, sx = 0, cy = 0, sy = 0, cz = 0, sz = 0, imm32 = 0, hasSideEffects = 0 in +def NOP : RR<0x79, (outs), (ins), "nop", []>; + +// LEA and LEASL instruction (load 32 bit imm to low or high part) +let cx = 0 in +defm LEA : RMm<"lea", 0x06, add, I64, i64, simm7Op64, simm32Op64>; +let cx = 1 in +defm LEASL : RMNDm<"lea.sl", 0x06, add, I64, i64, simm7Op64, simm32Op64>; +let isCodeGenOnly = 1 in { +let cx = 0 in +defm LEA32 : RMm<"lea", 0x06, add, I32, i32, simm7Op32, simm32Op32>; +let cx = 1 in +defm LEASL32 : RMNDm<"lea.sl", 0x06, add, I32, i32, simm7Op32, simm32Op32>; +} + +let cx = 0, cy = 1, cz = 0, sz = 0, hasSideEffects = 0 in { + def LEAasx : RM< + 0x06, (outs I64:$sx), (ins MEMri:$addr), + "lea $sx,$addr", [(set iPTR:$sx, ADDRri:$addr)]>; +} + +// 5.3.2.2. Fixed-Point Arithmetic Operation Instructions + +// ADD instruction +let cx = 0 in +defm ADD : RRNDm<"addu.l", 0x48, add, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm ADDUW : RRNDm<"addu.w", 0x48, add, I32, i32, simm7Op32, uimm6Op32>; + +// ADS instruction +let cx = 0 in +defm ADS : RRm<"adds.w.sx", 0x4A, add, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm ADSU : RRNDm<"adds.w.zx", 0x4A, add, I32, i32, simm7Op32, uimm6Op32>; + +// ADX instruction +let cx = 0 in +defm ADX : RRm<"adds.l", 0x59, add, I64, i64, simm7Op64, uimm6Op64>; + +// SUB instruction +let cx = 0 in +defm SUB : RRNDm<"subu.l", 0x58, sub, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm SUBUW : RRNDm<"subu.w", 0x58, sub, I32, i32, simm7Op32, uimm6Op32>; + +// SBS instruction +let cx = 0 in +defm SBS : RRNCm<"subs.w.sx", 0x5A, sub, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm SBSU : RRNDm<"subs.w.zx", 0x5A, sub, I32, i32, simm7Op32, uimm6Op32>; + +// SBX instruction +let cx = 0 in +defm SBX : RRNCm<"subs.l", 0x5B, sub, I64, i64, simm7Op64, uimm6Op64>; + +// MPY instruction +let cx = 0 in +defm MPY : RRNDm<"mulu.l", 0x49, mul, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm MPYUW : RRNDm<"mulu.w", 0x49, mul, I32, i32, simm7Op32, uimm6Op32>; + +// MPS instruction +let cx = 0 in +defm MPS : RRm<"muls.w.sx", 0x4B, mul, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm MPSU : RRNDm<"muls.w.zx", 0x4B, mul, I32, i32, simm7Op32, uimm6Op32>; + +// MPX instruction +let cx = 0 in +defm MPX : RRm<"muls.l", 0x6E, mul, I64, i64, simm7Op64, uimm6Op64>; + +// DIV instruction +let cx = 0 in +defm DIV : RRNCm<"divu.l", 0x6F, udiv, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm DIVUW : RRNCm<"divu.w", 0x6F, udiv, I32, i32, simm7Op32, uimm6Op32>; + +// DVS instruction +let cx = 0 in +defm DVS : RRNCm<"divs.w.sx", 0x7B, sdiv, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm DVSU : RRNDm<"divs.w.zx", 0x7B, sdiv, I32, i32, simm7Op32, uimm6Op32>; + +// DVX instruction +let cx = 0 in +defm DVX : RRNCm<"divs.l", 0x7F, sdiv, I64, i64, simm7Op64, uimm6Op64>; + +// CMP instruction +let cx = 0 in +defm CMP : RRNDm<"cmpu.l", 0x55, setcc, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm CMPUW : RRNDm<"cmpu.w", 0x55, setcc, I32, i32, simm7Op32, uimm6Op32>; + +// CPS instruction +let cx = 0 in +defm CPS : RRNDm<"cmps.w.sx", 0x7A, setcc, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm CPSU : RRNDm<"cmps.w.zx", 0x7A, setcc, I32, i32, simm7Op32, uimm6Op32>; + +// CPX instruction +let cx = 0 in +defm CPX : RRNDm<"cmps.l", 0x6A, setcc, I64, i64, simm7Op64, uimm6Op64>; + +// cx: sx/zx, cw: max/min + +let cw = 0 in defm CMXa : + RRm<"maxs.l", 0x68, VEmax, I64, i64, simm7Op64, uimm6Op64>; + +let cx = 0, cw = 0 in defm CMSa : + RRm<"maxs.w.zx", 0x78, VEmax, I32, i32, simm7Op32, uimm6Op32>; + +let cw = 1 in defm CMXi : + RRm<"mins.l", 0x68, VEmin, I64, i64, simm7Op64, uimm6Op64>; + +let cx = 1, cw = 0 in defm CMSi : + RRm<"mins.w.zx", 0x78, VEmin, I32, i32, simm7Op32, uimm6Op32>; + +// 5.3.2.3. Logical Arithmetic Operation Instructions + +// AND, OR, XOR, EQV, NND, and MRG instruction +let cx = 0 in { + defm AND : RRm<"and", 0x44, and, I64, i64, simm7Op64, uimm6Op64>; + defm OR : RRm<"or", 0x45, or, I64, i64, simm7Op64, uimm6Op64>; + defm XOR : RRm<"xor", 0x46, xor, I64, i64, simm7Op64, uimm6Op64>; + let isCodeGenOnly = 1 in { + defm AND32 : RRm<"and", 0x44, and, I32, i32, simm7Op32, uimm6Op32>; + defm OR32 : RRm<"or", 0x45, or, I32, i32, simm7Op32, uimm6Op32>; + defm XOR32 : RRm<"xor", 0x46, xor, I32, i32, simm7Op32, uimm6Op32>; + } + /* + defm EQV : RRm<"eqv", 0x47, eqv, I64, i64, simm7Op64, uimm6Op64>; + defm NND : RRm<"nnd", 0x54, nnd, I64, i64, simm7Op64, uimm6Op64>; + defm MRG : RRm<"mrg", 0x56, mrg, I64, i64, simm7Op64, uimm6Op64>; + */ +} + +// Bits operations + +let cx = 0 in { +defm PCNT : RRI2m<"pcnt", 0x38, ctpop, I64, i64, uimm6Op64>; +defm BRV : RRI2m<"brv", 0x39, bitreverse, I64, i64, uimm6Op64>; +defm LDZ : RRI2m<"ldz", 0x67, ctlz, I64, i64, uimm6Op64>; +defm BSWP : RRINDm<"bswp", 0x2B, bswap, I64, i64, simm7Op64, uimm6Op64>; +} + + +// 5.3.2.4 Shift Instructions + +let cx = 0 in +defm SRAX : RRIm<"sra.l", 0x77, sra, I64, i64, simm7Op32, uimm6Op64>; +let cx = 0 in +defm SRA : RRIm<"sra.w.sx", 0x76, sra, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm SRAU : RRINDm<"sra.w.zx", 0x76, sra, I32, i32, simm7Op32, uimm6Op32>; + +let cx = 0 in +defm SLL : RRIm<"sll", 0x65, shl, I64, i64, simm7Op32, uimm6Op64>; +let cx = 0 in +defm SLA : RRIm<"sla.w.sx", 0x66, shl, I32, i32, simm7Op32, uimm6Op32>; +let cx = 1 in +defm SLAU : RRINDm<"sla.w.zx", 0x66, shl, I32, i32, simm7Op32, uimm6Op32>; + +let cx = 0 in +defm SRL : RRIm<"srl", 0x75, srl, I64, i64, simm7Op32, uimm6Op64>; + +def : Pat<(i32 (srl i32:$src, (i32 simm7:$val))), + (EXTRACT_SUBREG (SRLri (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + $src, sub_i32), 32), imm:$val), sub_i32)>; +def : Pat<(i32 (srl i32:$src, i32:$val)), + (EXTRACT_SUBREG (SRLrr (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + $src, sub_i32), 32), $val), sub_i32)>; + +// 5.3.2.5. Floating-point Arithmetic Operation Instructions +let cx = 0 in +defm FAD : RRFm<"fadd.d", 0x4C, fadd, I64, f64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm FADS : RRFm<"fadd.s", 0x4C, fadd, F32, f32, simm7Op32, uimm6Op32>; +let cx = 0 in +defm FAQ : RRFm<"fadd.q", 0x6C, fadd, F128, f128, simm7Op128, uimm6Op128>; + +let cx = 0 in +defm FSB : RRFm<"fsub.d", 0x5C, fsub, I64, f64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm FSBS : RRFm<"fsub.s", 0x5C, fsub, F32, f32, simm7Op32, uimm6Op32>; +let cx = 0 in +defm FSQ : RRFm<"fsub.q", 0x7C, fsub, F128, f128, simm7Op128, uimm6Op128>; + +let cx = 0 in +defm FMP : RRFm<"fmul.d", 0x4D, fmul, I64, f64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm FMPS : RRFm<"fmul.s", 0x4D, fmul, F32, f32, simm7Op32, uimm6Op32>; +let cx = 0 in +defm FMQ : RRFm<"fmul.q", 0x6D, fmul, F128, f128, simm7Op128, uimm6Op128>; + +let cx = 0 in +defm FDV : RRFm<"fdiv.d", 0x5D, fdiv, I64, f64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm FDVS : RRFm<"fdiv.s", 0x5D, fdiv, F32, f32, simm7Op32, uimm6Op32>; + +// FCP instruction +let cx = 0 in +defm FCP : RRNDm<"fcmp.d", 0x7E, setcc, I64, f64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm FCPS : RRNDm<"fcmp.s", 0x7E, setcc, F32, f32, simm7Op32, uimm6Op32>; +let cx = 0 in +defm FCQ : RRFCQm<"fcmp.q", 0x7D, setcc, F128, f128, simm7Op128, uimm6Op128>; + +// FCM +let cw = 0 in { + let cx = 0 in + defm FCMA : RRNDm<"fmax.d", 0x3E, VEfmax, I64, f64, simm7Op64, uimm6Op64>; + let cx = 1 in + defm FCMAS : RRNDm<"fmax.s", 0x3E, VEfmax, F32, f32, simm7Op32, uimm6Op32>; +} +let cw = 1 in { + let cx = 0 in + defm FCMI : RRNDm<"fmin.d", 0x3E, VEfmin, I64, f64, simm7Op64, uimm6Op64>; + let cx = 1 in + defm FCMIS : RRNDm<"fmin.s", 0x3E, VEfmin, F32, f32, simm7Op32, uimm6Op32>; +} + +let cx = 0, cw = 0 /* sign extend */, cz = 1, sz = 0 /* round toward zero */ in +defm FIX : CVTm<"cvt.w.d.sx.rz", 0x4E, fp_to_sint, I32, i32, I64, f64, simm7Op32>; +let cx = 1, cw = 0 /* sign extend */, cz = 1, sz = 0 /* round toward zero */ in +defm FIXS : CVTm<"cvt.w.s.sx.rz", 0x4E, fp_to_sint, I32, i32, F32, f32, simm7Op32>; +let cx = 0, cz = 1, sz = 0 /* round toward zero */ in +defm FIXX : CVTm<"cvt.l.d.rz", 0x4F, fp_to_sint, I64, i64, I64, f64, simm7Op64>; +let cz = 0, sz = 0 in { + let cx = 0 in + defm FLT : CVTm<"cvt.d.w", 0x5E, sint_to_fp, I64, f64, I32, i32, simm7Op32>; + let cx = 1 in + defm FLTS : CVTm<"cvt.s.w", 0x5E, sint_to_fp, F32, f32, I32, i32, simm7Op32>; + let cx = 0 in + defm FLTX : CVTm<"cvt.d.l", 0x5F, sint_to_fp, I64, f64, I64, i64, simm7Op64>; + let cx = 0 in + defm CVS : CVTm<"cvt.s.d", 0x1F, fpround, F32, f32, I64, f64, simm7Op64>; + let cx = 1 in + defm CVSQ : CVTm<"cvt.s.q", 0x1F, fpround, F32, f32, F128, f128, simm7Op128>; + let cx = 0 in + defm CVD : CVTm<"cvt.d.s", 0x0F, fpextend, I64, f64, F32, f32, simm7Op32>; + let cx = 1 in + defm CVDQ : CVTm<"cvt.d.q", 0x0F, fpround, I64, f64, F128, f128, simm7Op128>; + let cx = 0 in + defm CVQ : CVTm<"cvt.q.d", 0x2D, fpextend, F128, f128, I64, f64, simm7Op64>; + let cx = 1 in + defm CVQS : CVTm<"cvt.q.s", 0x2D, fpextend, F128, f128, F32, f32, simm7Op32>; +} + +// Load and Store instructions +// As 1st step, only uses sz and imm32 to represent $addr +let mayLoad = 1, hasSideEffects = 0 in { +let cy = 0, sy = 0, cz = 1 in { +let cx = 0 in +def LDSri : RM< + 0x01, (outs I64:$sx), (ins MEMri:$addr), + "ld $sx, $addr", + [(set i64:$sx, (load ADDRri:$addr))]>; +let cx = 0 in +def LDUri : RM< + 0x02, (outs F32:$sx), (ins MEMri:$addr), + "ldu $sx, $addr", + [(set f32:$sx, (load ADDRri:$addr))]>; +let cx = 0 in +def LDLri : RM< + 0x03, (outs I32:$sx), (ins MEMri:$addr), + "ldl.sx $sx, $addr", + [(set i32:$sx, (load ADDRri:$addr))]>; +let cx = 1 in +def LDLUri : RM< + 0x03, (outs I32:$sx), (ins MEMri:$addr), + "ldl.zx $sx, $addr", + [(set i32:$sx, (load ADDRri:$addr))]>; +let cx = 0 in +def LD2Bri : RM< + 0x04, (outs I32:$sx), (ins MEMri:$addr), + "ld2b.sx $sx, $addr", + [(set i32:$sx, (sextloadi16 ADDRri:$addr))]>; +let cx = 1 in +def LD2BUri : RM< + 0x04, (outs I32:$sx), (ins MEMri:$addr), + "ld2b.zx $sx, $addr", + [(set i32:$sx, (zextloadi16 ADDRri:$addr))]>; +let cx = 0 in +def LD1Bri : RM< + 0x05, (outs I32:$sx), (ins MEMri:$addr), + "ld1b.sx $sx, $addr", + [(set i32:$sx, (sextloadi8 ADDRri:$addr))]>; +let cx = 1 in +def LD1BUri : RM< + 0x05, (outs I32:$sx), (ins MEMri:$addr), + "ld1b.zx $sx, $addr", + [(set i32:$sx, (zextloadi8 ADDRri:$addr))]>; +} +def LDQri : Pseudo< + (outs F128:$sx), (ins MEMri:$addr), + "# pseudo ldq $sx, $addr", + [(set f128:$sx, (load ADDRri:$addr))]>; +} + +let mayStore = 1, hasSideEffects = 0 in { +let cx = 0, cy = 0, sy = 0, cz = 1 in { +def STSri : RM< + 0x11, (outs), (ins MEMri:$addr, I64:$sx), + "st $sx, $addr", + [(store i64:$sx, ADDRri:$addr)]>; +def STUri : RM< + 0x12, (outs), (ins MEMri:$addr, F32:$sx), + "stu $sx, $addr", + [(store f32:$sx, ADDRri:$addr)]>; +def STLri : RM< + 0x13, (outs), (ins MEMri:$addr, I32:$sx), + "stl $sx, $addr", + [(store i32:$sx, ADDRri:$addr)]>; +def ST2Bri : RM< + 0x14, (outs), (ins MEMri:$addr, I32:$sx), + "st2b $sx, $addr", + [(truncstorei16 i32:$sx, ADDRri:$addr)]>; +def ST1Bri : RM< + 0x15, (outs), (ins MEMri:$addr, I32:$sx), + "st1b $sx, $addr", + [(truncstorei8 i32:$sx, ADDRri:$addr)]>; +} +def STQri : Pseudo< + (outs), (ins MEMri:$addr, F128:$sx), + "# pseudo stq $sx, $addr", + [(store f128:$sx, ADDRri:$addr)]>; +} + +def : Pat<(f64 (load ADDRri:$addr)), (LDSri ADDRri:$addr)>; +def : Pat<(store f64:$sx, ADDRri:$addr), (STSri ADDRri:$addr, $sx)>; + +// Patterns for unaligned load + +def unaligned4load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return cast(N)->getAlignment() < 4; +}]>; +def unaligned8load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return cast(N)->getAlignment() < 8; +}]>; +def : Pat<(i32 (unaligned4load ADDRri:$addr)), + (LDLri MEMri:$addr)>; +def : Pat<(f32 (unaligned4load ADDRri:$addr)), + (LDUri MEMri:$addr)>; +def : Pat<(i64 (unaligned8load ADDRri:$addr)), + (LDSri ADDRri:$addr)>; +def : Pat<(f64 (unaligned8load ADDRri:$addr)), + (LDSri ADDRri:$addr)>; + +// Patterns for unaligned store + +def unaligned4store : PatFrag<(ops node:$val, node:$ptr), + (store node:$val, node:$ptr), [{ + return cast(N)->getAlignment() < 4; +}]>; +def unaligned8store : PatFrag<(ops node:$val, node:$ptr), + (store node:$val, node:$ptr), [{ + return cast(N)->getAlignment() < 8; +}]>; +def : Pat<(unaligned4store i32:$sx, ADDRri:$addr), + (STLri ADDRri:$addr, $sx)>; +def : Pat<(unaligned4store f32:$sx, ADDRri:$addr), + (STUri ADDRri:$addr, $sx)>; +def : Pat<(unaligned8store i64:$sx, ADDRri:$addr), + (STSri ADDRri:$addr, $sx)>; +def : Pat<(unaligned8store f64:$sx, ADDRri:$addr), + (STSri ADDRri:$addr, $sx)>; + +// Patterns for unaligned sextload/zextload/extload + +def unaligned2extloadi16 : PatFrag<(ops node:$ptr), (extloadi16 node:$ptr), [{ + return cast(N)->getAlignment() < 2; +}]>; +def unaligned2sextloadi16 : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ + return cast(N)->getAlignment() < 2; +}]>; +def unaligned2zextloadi16 : PatFrag<(ops node:$ptr), (zextloadi16 node:$ptr), [{ + return cast(N)->getAlignment() < 2; +}]>; +def unaligned4extloadi32 : PatFrag<(ops node:$ptr), (extloadi32 node:$ptr), [{ + return cast(N)->getAlignment() < 4; +}]>; +def unaligned4sextloadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{ + return cast(N)->getAlignment() < 4; +}]>; +def unaligned4zextloadi32 : PatFrag<(ops node:$ptr), (zextloadi32 node:$ptr), [{ + return cast(N)->getAlignment() < 4; +}]>; +def : Pat<(i64 (unaligned2sextloadi16 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2Bri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (unaligned2zextloadi16 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (unaligned2extloadi16 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (unaligned4sextloadi32 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (unaligned4zextloadi32 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (unaligned4extloadi32 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>; + +// Patterns for unaligned truncstore + +def unaligned4truncstorei16 : PatFrag<(ops node:$val, node:$ptr), + (truncstorei16 node:$val, node:$ptr), [{ + return cast(N)->getAlignment() < 4; +}]>; +def unaligned8truncstorei32 : PatFrag<(ops node:$val, node:$ptr), + (truncstorei32 node:$val, node:$ptr), [{ + return cast(N)->getAlignment() < 8; +}]>; +def : Pat<(unaligned4truncstorei16 i64:$sx, ADDRri:$addr), + (ST2Bri ADDRri:$addr, (EXTRACT_SUBREG $sx, sub_i32))>; +def : Pat<(unaligned8truncstorei32 i64:$sx, ADDRri:$addr), + (STLri ADDRri:$addr, (EXTRACT_SUBREG $sx, sub_i32))>; + +// Jump instruction +let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cy = 1, cz = 1, + isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasSideEffects = 0 in +def BC : CF< + 0x19, (outs), (ins CCOp:$cf, I64:$sy, brtarget32:$imm32), + "b.${cf}.l $sy, $imm32", + []>; + +// Jump always instruction is treated as a special case of jump in order +// to make finding unconditional jump easy. +let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cf = 15 /* AT */, cy = 0, sy = 0, + cz = 1, + isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1, + hasDelaySlot = 1, isCodeGenOnly = 1, hasSideEffects = 0 in { +def BArr : CF< + 0x19, (outs), (ins MEMrr:$addr), + "b.l $addr", + [(brind ADDRrr:$addr)]>; +def BAri : CF< + 0x19, (outs), (ins MEMri:$addr), + "b.l $addr", + [(brind ADDRri:$addr)]>; +} + +// Jump never instruction is also a special case of jump. +let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cf = 0 /* AF */, cy = 1, sy = 0, + cz = 1, + isBranch = 1, isTerminator = 1, hasDelaySlot = 1, hasSideEffects = 0 in +def BN : CF< + 0x19, (outs), (ins brtarget32:$imm32), + "b.af.l $imm32", + []>; + +// Return instruction is also a special case of jump. +let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cf = 15 /* AT */, cy = 0, sy = 0, + cz = 1, sz = 0x10 /* SX10 */, imm32 = 0, Uses = [SX10], + isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1, + isCodeGenOnly = 1, hasSideEffects = 0 in +def RET : CF< + 0x19, (outs), (ins), + "b.l (,%lr)", + [(retflag)]>; + +// Branch and Save IC + +let cx = 0, cy = 0, cy = 0, cz = 1, hasSideEffects = 0 /* , Uses = [IC] */ in +def BSIC : RM<0x08, (outs), (ins I64:$sx, I64:$sz), "bsic $sx, (, ${sz})", []>; + +// Branch instruction +let cx = 0, cx2 = 0, bpf = 0 /* NONE */ in +defm BCRL : BCRm<"br${cf}.l", "br.l", 0x18, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1, cx2 = 0, bpf = 0 /* NONE */ in +defm BCRW : BCRm<"br${cf}.w", "br.w", 0x18, I32, i32, simm7Op32, uimm6Op32>; +let cx = 0, cx2 = 1, bpf = 0 /* NONE */ in +defm BCRD : BCRm<"br${cf}.d", "br.d", 0x18, I64, f64, simm7Op64, uimm6Op64>; +let cx = 1, cx2 = 1, bpf = 0 /* NONE */ in +defm BCRS : BCRm<"br${cf}.s", "br.s", 0x18, F32, f32, simm7Op32, uimm6Op32>; + +// Load and Store host memory instructions +let cx = 0, cy = 0, cz = 1, hasSideEffects = 0 in { +let sy = 3 in +def LHMri : RM< + 0x21, (outs I64:$sx), (ins MEMASri:$addr), + "lhm.l $sx, $addr", + []>; +let sy = 2 in +def LHMLri : RM< + 0x21, (outs I32:$sx), (ins MEMASri:$addr), + "lhm.w $sx, $addr", + []>; +let sy = 1 in +def LHM2Bri : RM< + 0x21, (outs I16:$sx), (ins MEMASri:$addr), + "lhm.h $sx, $addr", + []>; +let sy = 0 in +def LHM1Bri : RM< + 0x21, (outs I8:$sx), (ins MEMASri:$addr), + "lhm.b $sx, $addr", + []>; +} + +let cx = 0, cy = 0, cz = 1, hasSideEffects = 0 in { +let sy = 3 in +def SHMri : RM< + 0x31, (outs), (ins MEMASri:$addr, I64:$sx), + "shm.l $sx, $addr", + []>; +let sy = 2 in +def SHMLri : RM< + 0x31, (outs), (ins MEMASri:$addr, I32:$sx), + "shm.l $sx, $addr", + []>; +let sy = 1 in +def SHM2Bri : RM< + 0x31, (outs), (ins MEMASri:$addr, I16:$sx), + "shm.l $sx, $addr", + []>; +let sy = 0 in +def SHM1Bri : RM< + 0x31, (outs), (ins MEMASri:$addr, I8:$sx), + "shm.l $sx, $addr", + []>; +} + +let cx = 0, sx = 0, cy = 0, sy = 0, cz = 0, sz = 0, hasSideEffects = 0 in +def MONC : RR< + 0x3F, (outs), (ins), + "monc", + []>; + +let cx = 1, sx = 0, cy = 0, sy = 0, cz = 0, sz = 0, hasSideEffects = 0 in +def MONCT : RR< + 0x3F, (outs), (ins), + "monc.hdb", + []>; + +// Save Instruction Counter + +let cx = 0, cy = 0, sy = 0, cz = 0, sz = 0, hasSideEffects = 0 /* , Uses = [IC] */ in +def SIC : RR<0x28, (outs I32:$sx), (ins), "sic $sx", []>; + +// Test and Set 1 AM (multiple length swap) + +let cx = 0 in +defm TS1AML : RRCASm<"ts1am.l", 0x42, add, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm TS1AMW : RRCASm<"ts1am.w", 0x42, add, I32, i32, simm7Op32, uimm6Op32>; + +// Atomic swap +// FIXME: Assign 4 byte align address to $src +// def : Pat<(i32 (atomic_swap_8 ADDRri:$src, i32:$new)), +// (TS1AMWasi MEMASri:$src, 1, $new)>; +// def : Pat<(i32 (atomic_swap_16 ADDRri:$src, i32:$new)), +// (TS1AMWasi MEMASri:$src, 3, $new)>; +def : Pat<(i32 (atomic_swap_32 ADDRri:$src, i32:$new)), + (TS1AMWasi MEMASri:$src, 15, $new)>; +def : Pat<(i64 (atomic_swap_64 ADDRri:$src, i64:$new)), + (TS1AMLasi MEMASri:$src, 127, $new)>; + +// Compare and Swap + +let cx = 0 in +defm CASL : RRCASm<"cas.l", 0x62, add, I64, i64, simm7Op64, uimm6Op64>; +let cx = 1 in +defm CASW : RRCASm<"cas.w", 0x62, add, I32, i32, simm7Op32, uimm6Op32>; + +// Atomic cmp and swap +def : Pat<(i32 (atomic_cmp_swap_32 ADDRri:$src, i32:$cmp, i32:$new)), + (CASWasr MEMASri:$src, $cmp, $new)>; +def : Pat<(i64 (atomic_cmp_swap_64 ADDRri:$src, i64:$cmp, i64:$new)), + (CASLasr MEMASri:$src, $cmp, $new)>; + +// Transfer Control Instruction + +let avo = 0, c2 = 0, c1 = 0, c0 = 0, hasSideEffects = 1 in { + let lf = 0, sf = 1 in + def FENCEstore : RRFENCE<0x20, (outs), (ins), "fencem 1", []>; + let lf = 1, sf = 0 in + def FENCEload : RRFENCE<0x20, (outs), (ins), "fencem 2", []>; + let lf = 1, sf = 1 in + def FENCEloadstore : RRFENCE<0x20, (outs), (ins), "fencem 3", []>; +} + +def : Pat<(int_ve_fencem1), (FENCEstore)>; +def : Pat<(int_ve_fencem2), (FENCEload)>; +def : Pat<(int_ve_fencem3), (FENCEloadstore)>; + +// Set Vector Out-of-order memory access Boundary + +let sx = 0, sy = 0, sz = 0, hasSideEffects = 1 in +def SVOB : RR<0x30, (outs), (ins), "svob", []>; + +// MEMBARRIER +let hasSideEffects = 1 in +def MEMBARRIER : Pseudo<(outs), (ins), "# MEMBARRIER", + [(MemBarrier)] >; + +//===----------------------------------------------------------------------===// +// SJLJ Exception handling intrinsics +//===----------------------------------------------------------------------===// + +let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in { + def EH_SjLj_SetJmp : Pseudo<(outs I32:$dst), (ins I64:$buf), + "# EH_SJLJ_SETJMP", + [(set I32:$dst, (VEeh_sjlj_setjmp I64:$buf))]>; + let isTerminator = 1 in { + def EH_SjLj_LongJmp : Pseudo<(outs), (ins I64:$buf), + "# EH_SJLJ_LONGJMP", + [(VEeh_sjlj_longjmp I64:$buf)]>; + } +} + +let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1 in + def EH_SjLj_Setup_Dispatch : Pseudo<(outs), (ins), "# EH_SJLJ_SETUP_DISPATCH", + [(VEeh_sjlj_setup_dispatch)]>; + +//===----------------------------------------------------------------------===// +// Dummy instruction for CPU flow control +//===----------------------------------------------------------------------===// + +let mayLoad = 1, mayStore = 0, hasSideEffects = 1, isTrap = 1 in { + def TRAP : Pseudo<(outs), (ins), "# TRAP", [(trap)]>; +} + +//===----------------------------------------------------------------------===// +// Instructions for CodeGenOnly +//===----------------------------------------------------------------------===// + +let isCodeGenOnly = 1 in { + +// Call instruction +let Defs = [SX10], Uses = [SX11], hasDelaySlot = 1, isCall = 1, hasSideEffects = 0 in { +let cx = 0, sx = 10, cy = 0, sy = 0, cz = 0, sz = 0 in +def CALL : RM< + 0x08, (outs), (ins calltarget:$imm32, variable_ops), + "bsic %lr, $imm32", + []>; +/* +// use sy and sz to represent 2 registers +let cx = 0, sx = 10, cy = 1, cz = 1, imm32 = 0 in +def CALLrr : RM< + 0x08, (outs), (ins MEMrr:$ptr, variable_ops), + "bsic %lr, $ptr", + [(call ADDRrr:$ptr)]>; +// use sz to represent a register, and use imm32 to represent immediate value +let cx = 0, sx = 10, cy = 0, sy = 0, cz = 1 in +def CALLri : RM< + 0x08, (outs), (ins MEMri:$ptr, variable_ops), + "bsic %lr, $ptr", + [(call ADDRri:$ptr)]>; +*/ +// use sz to represent a register +let cx = 0, sx = 10, cy = 0, sy = 0, cz = 1, imm32 = 0 in +def CALLr : RM< + 0x08, (outs), (ins I64:$sz, variable_ops), + "bsic %lr, (,$sz)", + []>; +} + +} + +//===----------------------------------------------------------------------===// +// Pattern Matchings +//===----------------------------------------------------------------------===// + +// Small immediates. +def : Pat<(i32 simm7:$val), (OR32im1 imm:$val, 0)>; +def : Pat<(i64 simm7:$val), (ORim1 imm:$val, 0)>; +// Medium immediates. +def : Pat<(i32 simm32:$val), (LEA32zzi imm:$val)>; +def : Pat<(i64 simm32:$val), (LEAzzi imm:$val)>; +def : Pat<(i64 uimm32:$val), (ANDrm0 (LEAzzi imm:$val), 32)>; +// Arbitrary immediates. +def : Pat<(i64 lozero:$val), + (LEASLzzi (HI32 imm:$val))>; +def : Pat<(i64 lomsbzero:$val), + (LEASLrzi (LEAzzi (LO32 imm:$val)), (HI32 imm:$val))>; +def : Pat<(i64 imm:$val), + (LEASLrzi (ANDrm0 (LEAzzi (LO32 imm:$val)), 32), + (HI32 imm:$val))>; + +// floating point +def : Pat<(f32 fpimm:$val), + (COPY_TO_REGCLASS (LEASLzzi (LOFP32 $val)), F32)>; +def : Pat<(f64 fplozero:$val), + (LEASLzzi (HIFP32 $val))>; +def : Pat<(f64 fplomsbzero:$val), + (LEASLrzi (LEAzzi (LOFP32 $val)), (HIFP32 $val))>; +def : Pat<(f64 fpimm:$val), + (LEASLrzi (ANDrm0 (LEAzzi (LOFP32 $val)), 32), + (HIFP32 $val))>; + +// The same integer registers are used for i32 and i64 values. +// When registers hold i32 values, the high bits are don't care. + +// Cast to i1 +def : Pat<(sext_inreg I32:$src, i1), + (SRAri (SLAri $src, 31), 31)>; +def : Pat<(sext_inreg I64:$src, i1), + (SRAXri (SLLri $src, 63), 63)>; + +// Cast to i8 +def : Pat<(sext_inreg I32:$src, i8), + (SRAri (SLAri $src, 24), 24)>; +def : Pat<(sext_inreg I64:$src, i8), + (SRAXri (SLLri $src, 56), 56)>; +def : Pat<(sext_inreg (i32 (trunc i64:$src)), i8), + (EXTRACT_SUBREG (SRAXri (SLLri $src, 56), 56), sub_i32)>; +def : Pat<(and (trunc i64:$src), 0xff), + (AND32rm0 (EXTRACT_SUBREG $src, sub_i32), 56)>; + +// Cast to i16 +def : Pat<(sext_inreg I32:$src, i16), + (SRAri (SLAri $src, 16), 16)>; +def : Pat<(sext_inreg I64:$src, i16), + (SRAXri (SLLri $src, 48), 48)>; +def : Pat<(sext_inreg (i32 (trunc i64:$src)), i16), + (EXTRACT_SUBREG (SRAXri (SLLri $src, 48), 48), sub_i32)>; +def : Pat<(and (trunc i64:$src), 0xffff), + (AND32rm0 (EXTRACT_SUBREG $src, sub_i32), 48)>; + +// Cast to i32 +def : Pat<(i32 (trunc i64:$src)), + (ADSrm1 (EXTRACT_SUBREG $src, sub_i32), 0)>; +def : Pat<(i32 (fp_to_sint f128:$sy)), (FIXr (CVDQr $sy))>; + +// Cast to i64 +def : Pat<(sext_inreg I64:$src, i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + (ADSrm1 (EXTRACT_SUBREG $src, sub_i32), 0), sub_i32)>; +def : Pat<(i64 (sext i32:$sy)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (ADSrm1 $sy, 0), sub_i32)>; +def : Pat<(i64 (zext i32:$sy)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (ADSUrm1 $sy, 0), sub_i32)>; +def : Pat<(i64 (fp_to_sint f32:$sy)), (FIXXr (CVDr $sy))>; +def : Pat<(i64 (fp_to_sint f128:$sy)), (FIXXr (CVDQr $sy))>; + +// Cast to f32 +def : Pat<(f32 (sint_to_fp i64:$sy)), (CVSr (FLTXr i64:$sy))>; + +// Cast to f64 + +// Cast to f128 +def : Pat<(f128 (sint_to_fp i32:$sy)), (CVQr (FLTr $sy))>; +def : Pat<(f128 (sint_to_fp i64:$sy)), (CVQr (FLTXr $sy))>; + +def : Pat<(i64 (anyext i32:$sy)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32)>; + +// extload, sextload and zextload stuff +def : Pat<(i64 (sextloadi8 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1Bri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (zextloadi8 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (sextloadi16 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2Bri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (zextloadi16 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (sextloadi32 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (zextloadi32 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (extloadi8 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (extloadi16 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>; +def : Pat<(i64 (extloadi32 ADDRri:$addr)), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>; + +// anyextload +def : Pat<(extloadi8 ADDRri:$addr), (LD1BUri MEMri:$addr)>; +def : Pat<(extloadi16 ADDRri:$addr), (LD2BUri MEMri:$addr)>; + +// truncstore +def : Pat<(truncstorei8 i64:$src, ADDRri:$addr), + (ST1Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>; +def : Pat<(truncstorei16 i64:$src, ADDRri:$addr), + (ST2Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>; +def : Pat<(truncstorei32 i64:$src, ADDRri:$addr), + (STLri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>; + +// Atomic loads +def : Pat<(atomic_load_8 ADDRri:$src), (LD1BUri MEMri:$src)>; +def : Pat<(atomic_load_16 ADDRri:$src), (LD2BUri MEMri:$src)>; +def : Pat<(atomic_load_32 ADDRri:$src), (LDLUri MEMri:$src)>; +def : Pat<(atomic_load_64 ADDRri:$src), (LDSri MEMri:$src)>; + +// Atomic stores +def : Pat<(atomic_store_8 ADDRri:$ptr, i32:$val), + (ST1Bri MEMri:$ptr, $val)>; +def : Pat<(atomic_store_16 ADDRri:$ptr, i32:$val), + (ST2Bri MEMri:$ptr, $val)>; +def : Pat<(atomic_store_32 ADDRri:$ptr, i32:$val), + (STLri MEMri:$ptr, $val)>; +def : Pat<(atomic_store_64 ADDRri:$ptr, i64:$val), + (STSri MEMri:$ptr, $val)>; + +// Address calculation and its optimization +def : Pat<(VEhi tglobaladdr:$in), (LEASLzzi tglobaladdr:$in)>; +def : Pat<(VElo tglobaladdr:$in), (ANDrm0 (LEAzzi tglobaladdr:$in), 32)>; +def : Pat<(add (VEhi tglobaladdr:$in1), (VElo tglobaladdr:$in2)), + (LEASLrzi (ANDrm0 (LEAzzi tglobaladdr:$in2), 32), + (tglobaladdr:$in1))>; + +// GlobalTLS address calculation and its optimization +def : Pat<(VEhi tglobaltlsaddr:$in), (LEASLzzi tglobaltlsaddr:$in)>; +def : Pat<(VElo tglobaltlsaddr:$in), (ANDrm0 (LEAzzi tglobaltlsaddr:$in), 32)>; +def : Pat<(add (VEhi tglobaltlsaddr:$in1), (VElo tglobaltlsaddr:$in2)), + (LEASLrzi (ANDrm0 (LEAzzi tglobaltlsaddr:$in2), 32), + (tglobaltlsaddr:$in1))>; + +// Address calculation and its optimization +def : Pat<(VEhi tconstpool:$in), (LEASLzzi tconstpool:$in)>; +def : Pat<(VElo tconstpool:$in), (ANDrm0 (LEAzzi tconstpool:$in), 32)>; +def : Pat<(add (VEhi tconstpool:$in1), (VElo tconstpool:$in2)), + (LEASLrzi (ANDrm0 (LEAzzi tconstpool:$in2), 32), + (tconstpool:$in1))>; + +// Address calculation and its optimization +def : Pat<(VEhi texternalsym:$in), (LEASLzzi texternalsym:$in)>; +def : Pat<(VElo texternalsym:$in), (ANDrm0 (LEAzzi texternalsym:$in), 32)>; +def : Pat<(add (VEhi texternalsym:$in1), (VElo texternalsym:$in2)), + (LEASLrzi (ANDrm0 (LEAzzi texternalsym:$in2), 32), + (texternalsym:$in1))>; + +// Address store of mcsym +def : Pat<(store (i64 mcsym:$src), ADDRri:$dst), + (STSri ADDRri:$dst, (LEASLrzi (ANDrm0 (LEAzzi mcsym:$src), 32), + (mcsym:$src)))>; + +// Calls +def : Pat<(call tglobaladdr:$dst), + (CALL tglobaladdr:$dst)>; +def : Pat<(call texternalsym:$dst), + (CALL texternalsym:$dst)>; +def : Pat<(call i64:$dst), + (CALLr i64:$dst)>; + +// Branches +def : Pat<(br bb:$addr), (BCRLa bb:$addr)>; + +// brcc +def : Pat<(brcc CCSIOp:$cond, i32:$l, i32:$r, bb:$addr), + (BCRWrr (icond2cc $cond), $l, $r, bb:$addr)>; +def : Pat<(brcc CCUIOp:$cond, i32:$l, i32:$r, bb:$addr), + (BCRWir (icond2cc $cond), 0, (CMPUWrr $r, $l), bb:$addr)>; +def : Pat<(brcc CCSIOp:$cond, i64:$l, i64:$r, bb:$addr), + (BCRLrr (icond2cc $cond), $l, $r, bb:$addr)>; +def : Pat<(brcc CCUIOp:$cond, i64:$l, i64:$r, bb:$addr), + (BCRLir (icond2cc $cond), 0, (CMPrr $r, $l), bb:$addr)>; +def : Pat<(brcc cond:$cond, f32:$l, f32:$r, bb:$addr), + (BCRSrr (fcond2cc $cond), $l, $r, bb:$addr)>; +def : Pat<(brcc cond:$cond, f64:$l, f64:$r, bb:$addr), + (BCRDrr (fcond2cc $cond), $l, $r, bb:$addr)>; +def : Pat<(brcc cond:$cond, f128:$l, f128:$r, bb:$addr), + (BCRDrr (fcond2cc $cond), 0, (FCQrr $r, $l), bb:$addr)>; + +//===----------------------------------------------------------------------===// +// Pseudo Instructions +//===----------------------------------------------------------------------===// + +// GETGOT for PIC +let Defs = [SX15 /* %got */, SX16 /* %plt */], hasSideEffects = 0 in { + def GETGOT : Pseudo<(outs getGOT:$getpcseq), (ins), "$getpcseq", [] >; +} + +// GETFUNPLT for PIC +let hasSideEffects = 0 in +def GETFUNPLT : Pseudo<(outs I64:$dst), (ins i64imm:$addr), + "$dst, $addr", + [(set iPTR:$dst, (GetFunPLT tglobaladdr:$addr))] >; + +def : Pat<(GetFunPLT tglobaladdr:$dst), + (GETFUNPLT tglobaladdr:$dst)>; +def : Pat<(GetFunPLT texternalsym:$dst), + (GETFUNPLT texternalsym:$dst)>; + +// GETTLSADDR for TLS +let Defs = [SX0, SX10, SX12], hasSideEffects = 0 in +def GETTLSADDR : Pseudo<(outs), (ins i64imm:$addr), + "# GETTLSADDR $addr", + [(GetTLSAddr tglobaltlsaddr:$addr)] >; + +def : Pat<(GetTLSAddr tglobaltlsaddr:$dst), + (GETTLSADDR tglobaltlsaddr:$dst)>; + +let Defs = [SX11], Uses = [SX11], hasSideEffects = 0 in { +def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt, i64imm:$amt2), + "# ADJCALLSTACKDOWN $amt, $amt2", + [(callseq_start timm:$amt, timm:$amt2)]>; +def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2), + "# ADJCALLSTACKUP $amt1", + [(callseq_end timm:$amt1, timm:$amt2)]>; +} + +let Defs = [SX8], Uses = [SX8, SX11], hasSideEffects = 0 in +def EXTEND_STACK : Pseudo<(outs), (ins), + "# EXTEND STACK", + []>; +let hasSideEffects = 0 in +def EXTEND_STACK_GUARD : Pseudo<(outs), (ins), + "# EXTEND STACK GUARD", + []>; + +// Dynamic stack allocation yields a __llvm_grow_stack for VE targets. +// These calls are needed to probe the stack when allocating more over +// %s8 (%sl - stack limit). + +let Uses = [SX11], hasSideEffects = 1 in +def GETSTACKTOP : Pseudo<(outs I64:$dst), (ins), + "# GET STACK TOP", + [(set iPTR:$dst, (GetStackTop))]>; + +// SETCC pattern matches +// +// CMP %tmp, lhs, rhs ; compare lhs and rhs +// or %res, 0, (0)1 ; initialize by 0 +// CMOV %res, (63)0, %tmp ; set 1 if %tmp is true + +def : Pat<(i32 (setcc i64:$LHS, i64:$RHS, CCSIOp:$cond)), + (EXTRACT_SUBREG + (CMOVLrm0 (icond2cc $cond), + (CPXrr i64:$LHS, i64:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +def : Pat<(i32 (setcc i64:$LHS, i64:$RHS, CCUIOp:$cond)), + (EXTRACT_SUBREG + (CMOVLrm0 (icond2cc $cond), + (CMPrr i64:$LHS, i64:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +def : Pat<(i32 (setcc i32:$LHS, i32:$RHS, CCSIOp:$cond)), + (EXTRACT_SUBREG + (CMOVWrm0 (icond2cc $cond), + (CPSrr i32:$LHS, i32:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +def : Pat<(i32 (setcc i32:$LHS, i32:$RHS, CCUIOp:$cond)), + (EXTRACT_SUBREG + (CMOVWrm0 (icond2cc $cond), + (CMPUWrr i32:$LHS, i32:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +def : Pat<(i32 (setcc f128:$LHS, f128:$RHS, cond:$cond)), + (EXTRACT_SUBREG + (CMOVDrm0 (fcond2cc $cond), + (FCQrr f128:$LHS, f128:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +def : Pat<(i32 (setcc f64:$LHS, f64:$RHS, cond:$cond)), + (EXTRACT_SUBREG + (CMOVDrm0 (fcond2cc $cond), + (FCPrr f64:$LHS, f64:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +def : Pat<(i32 (setcc f32:$LHS, f32:$RHS, cond:$cond)), + (EXTRACT_SUBREG + (CMOVSrm0 (fcond2cc $cond), + (FCPSrr f32:$LHS, f32:$RHS), + 63, + (ORim1 0, 0)), sub_i32)>; + +// Special SELECTCC pattern matches +// Use min/max for better performance. +// +// MAX/MIN %res, %lhs, %rhs + +def : Pat<(f64 (selectcc f64:$LHS, f64:$RHS, f64:$LHS, f64:$RHS, SETOGT)), + (FCMArr $LHS, $RHS)>; +def : Pat<(f32 (selectcc f32:$LHS, f32:$RHS, f32:$LHS, f32:$RHS, SETOGT)), + (FCMASrr $LHS, $RHS)>; +def : Pat<(i64 (selectcc i64:$LHS, i64:$RHS, i64:$LHS, i64:$RHS, SETGT)), + (CMXarr $LHS, $RHS)>; +def : Pat<(i32 (selectcc i32:$LHS, i32:$RHS, i32:$LHS, i32:$RHS, SETGT)), + (CMSarr $LHS, $RHS)>; +def : Pat<(f64 (selectcc f64:$LHS, f64:$RHS, f64:$LHS, f64:$RHS, SETOGE)), + (FCMArr $LHS, $RHS)>; +def : Pat<(f32 (selectcc f32:$LHS, f32:$RHS, f32:$LHS, f32:$RHS, SETOGE)), + (FCMASrr $LHS, $RHS)>; +def : Pat<(i64 (selectcc i64:$LHS, i64:$RHS, i64:$LHS, i64:$RHS, SETGE)), + (CMXarr $LHS, $RHS)>; +def : Pat<(i32 (selectcc i32:$LHS, i32:$RHS, i32:$LHS, i32:$RHS, SETGE)), + (CMSarr $LHS, $RHS)>; + +def : Pat<(f64 (selectcc f64:$LHS, f64:$RHS, f64:$LHS, f64:$RHS, SETOLT)), + (FCMIrr $LHS, $RHS)>; +def : Pat<(f32 (selectcc f32:$LHS, f32:$RHS, f32:$LHS, f32:$RHS, SETOLT)), + (FCMISrr $LHS, $RHS)>; +def : Pat<(i64 (selectcc i64:$LHS, i64:$RHS, i64:$LHS, i64:$RHS, SETLT)), + (CMXirr $LHS, $RHS)>; +def : Pat<(i32 (selectcc i32:$LHS, i32:$RHS, i32:$LHS, i32:$RHS, SETLT)), + (CMSirr $LHS, $RHS)>; +def : Pat<(f64 (selectcc f64:$LHS, f64:$RHS, f64:$LHS, f64:$RHS, SETOLE)), + (FCMIrr $LHS, $RHS)>; +def : Pat<(f32 (selectcc f32:$LHS, f32:$RHS, f32:$LHS, f32:$RHS, SETOLE)), + (FCMISrr $LHS, $RHS)>; +def : Pat<(i64 (selectcc i64:$LHS, i64:$RHS, i64:$LHS, i64:$RHS, SETLE)), + (CMXirr $LHS, $RHS)>; +def : Pat<(i32 (selectcc i32:$LHS, i32:$RHS, i32:$LHS, i32:$RHS, SETLE)), + (CMSirr $LHS, $RHS)>; + +// Generic SELECTCC pattern matches +// +// CMP %tmp, %l, %r ; compare %l and %r +// or %res, %f, (0)1 ; initialize by %f +// CMOV %res, %t, %tmp ; set %t if %tmp is true + +// selectcc for i64 result +def : Pat<(i64 (selectcc i32:$l, i32:$r, i64:$t, i64:$f, CCSIOp:$cond)), + (CMOVWrr (icond2cc $cond), (CPSrr $l, $r), $t, $f)>; +def : Pat<(i64 (selectcc i32:$l, i32:$r, i64:$t, i64:$f, CCUIOp:$cond)), + (CMOVWrr (icond2cc $cond), (CMPUWrr $l, $r), $t, $f)>; +def : Pat<(i64 (selectcc i64:$l, i64:$r, i64:$t, i64:$f, CCSIOp:$cond)), + (CMOVLrr (icond2cc $cond), (CPXrr $l, $r), $t, $f)>; +def : Pat<(i64 (selectcc i64:$l, i64:$r, i64:$t, i64:$f, CCUIOp:$cond)), + (CMOVLrr (icond2cc $cond), (CMPrr $l, $r), $t, $f)>; +def : Pat<(i64 (selectcc f32:$l, f32:$r, i64:$t, i64:$f, cond:$cond)), + (CMOVSrr (fcond2cc $cond), (FCPSrr $l, $r), $t, $f)>; +def : Pat<(i64 (selectcc f64:$l, f64:$r, i64:$t, i64:$f, cond:$cond)), + (CMOVDrr (fcond2cc $cond), (FCPrr $l, $r), $t, $f)>; +def : Pat<(i64 (selectcc f128:$l, f128:$r, i64:$t, i64:$f, cond:$cond)), + (CMOVDrr (fcond2cc $cond), (FCQrr $l, $r), $t, $f)>; + +// selectcc for i32 result +def : Pat<(i32 (selectcc i32:$l, i32:$r, i32:$t, i32:$f, CCSIOp:$cond)), + (EXTRACT_SUBREG + (CMOVWrr (icond2cc $cond), + (CPSrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; +def : Pat<(i32 (selectcc i32:$l, i32:$r, i32:$t, i32:$f, CCUIOp:$cond)), + (EXTRACT_SUBREG + (CMOVWrr (icond2cc $cond), + (CMPUWrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; +def : Pat<(i32 (selectcc i64:$l, i64:$r, i32:$t, i32:$f, CCSIOp:$cond)), + (EXTRACT_SUBREG + (CMOVLrr (icond2cc $cond), + (CPXrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; +def : Pat<(i32 (selectcc i64:$l, i64:$r, i32:$t, i32:$f, CCUIOp:$cond)), + (EXTRACT_SUBREG + (CMOVLrr (icond2cc $cond), + (CMPrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; +def : Pat<(i32 (selectcc f32:$l, f32:$r, i32:$t, i32:$f, cond:$cond)), + (EXTRACT_SUBREG + (CMOVSrr (fcond2cc $cond), + (FCPSrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; +def : Pat<(i32 (selectcc f64:$l, f64:$r, i32:$t, i32:$f, cond:$cond)), + (EXTRACT_SUBREG + (CMOVDrr (fcond2cc $cond), + (FCPrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; +def : Pat<(i32 (selectcc f128:$l, f128:$r, i32:$t, i32:$f, cond:$cond)), + (EXTRACT_SUBREG + (CMOVDrr (fcond2cc $cond), + (FCQrr $l, $r), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; + +// selectcc for f128 result +def : Pat<(f128 (selectcc i32:$l, i32:$r, f128:$t, f128:$f, CCSIOp:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVWrr (icond2cc $cond), (CPSrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVWrr (icond2cc $cond), (CPSrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; +def : Pat<(f128 (selectcc i32:$l, i32:$r, f128:$t, f128:$f, CCUIOp:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVWrr (icond2cc $cond), (CMPUWrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVWrr (icond2cc $cond), (CMPUWrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; +def : Pat<(f128 (selectcc i64:$l, i64:$r, f128:$t, f128:$f, CCSIOp:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVLrr (icond2cc $cond), (CPXrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVLrr (icond2cc $cond), (CPXrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; +def : Pat<(f128 (selectcc i64:$l, i64:$r, f128:$t, f128:$f, CCUIOp:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVLrr (icond2cc $cond), (CMPrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVLrr (icond2cc $cond), (CMPrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; +def : Pat<(f128 (selectcc f32:$l, f32:$r, f128:$t, f128:$f, cond:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVSrr (fcond2cc $cond), (FCPSrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVSrr (fcond2cc $cond), (FCPSrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; +def : Pat<(f128 (selectcc f64:$l, f64:$r, f128:$t, f128:$f, cond:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVDrr (fcond2cc $cond), (FCPrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVDrr (fcond2cc $cond), (FCPrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; +def : Pat<(f128 (selectcc f128:$l, f128:$r, f128:$t, f128:$f, cond:$cond)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVDrr (fcond2cc $cond), (FCQrr $l, $r), + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVDrr (fcond2cc $cond), (FCQrr $l, $r), + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; + +// selectcc for f64 result +def : Pat<(f64 (selectcc i32:$l, i32:$r, f64:$t, f64:$f, CCSIOp:$cond)), + (CMOVWrr (icond2cc $cond), (CPSrr $l, $r), $t, $f)>; +def : Pat<(f64 (selectcc i32:$l, i32:$r, f64:$t, f64:$f, CCUIOp:$cond)), + (CMOVWrr (icond2cc $cond), (CMPUWrr $l, $r), $t, $f)>; +def : Pat<(f64 (selectcc i64:$l, i64:$r, f64:$t, f64:$f, CCSIOp:$cond)), + (CMOVLrr (icond2cc $cond), (CPXrr $l, $r), $t, $f)>; +def : Pat<(f64 (selectcc i64:$l, i64:$r, f64:$t, f64:$f, CCUIOp:$cond)), + (CMOVLrr (icond2cc $cond), (CMPrr $l, $r), $t, $f)>; +def : Pat<(f64 (selectcc f32:$l, f32:$r, f64:$t, f64:$f, cond:$cond)), + (CMOVSrr (fcond2cc $cond), (FCPSrr $l, $r), $t, $f)>; +def : Pat<(f64 (selectcc f64:$l, f64:$r, f64:$t, f64:$f, cond:$cond)), + (CMOVDrr (fcond2cc $cond), (FCPrr $l, $r), $t, $f)>; +def : Pat<(f64 (selectcc f128:$l, f128:$r, f64:$t, f64:$f, cond:$cond)), + (CMOVDrr (fcond2cc $cond), (FCQrr $l, $r), $t, $f)>; + +// selectcc for f32 result +def : Pat<(f32 (selectcc i32:$l, i32:$r, f32:$t, f32:$f, CCSIOp:$cond)), + (EXTRACT_SUBREG + (CMOVWrr (icond2cc $cond), + (CPSrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; +def : Pat<(f32 (selectcc i32:$l, i32:$r, f32:$t, f32:$f, CCUIOp:$cond)), + (EXTRACT_SUBREG + (CMOVWrr (icond2cc $cond), + (CMPUWrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; +def : Pat<(f32 (selectcc i64:$l, i64:$r, f32:$t, f32:$f, CCSIOp:$cond)), + (EXTRACT_SUBREG + (CMOVLrr (icond2cc $cond), + (CPXrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; +def : Pat<(f32 (selectcc i64:$l, i64:$r, f32:$t, f32:$f, CCUIOp:$cond)), + (EXTRACT_SUBREG + (CMOVLrr (icond2cc $cond), + (CMPrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; +def : Pat<(f32 (selectcc f32:$l, f32:$r, f32:$t, f32:$f, cond:$cond)), + (EXTRACT_SUBREG + (CMOVSrr (fcond2cc $cond), + (FCPSrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; +def : Pat<(f32 (selectcc f64:$l, f64:$r, f32:$t, f32:$f, cond:$cond)), + (EXTRACT_SUBREG + (CMOVDrr (fcond2cc $cond), + (FCPrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; +def : Pat<(f32 (selectcc f128:$l, f128:$r, f32:$t, f32:$f, cond:$cond)), + (EXTRACT_SUBREG + (CMOVDrr (fcond2cc $cond), + (FCQrr $l, $r), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; + +// Generic SELECT pattern matches +// Use cmov.w for all cases since %pred holds i32. +// +// CMOV.w.ne %res, %tval, %tmp ; set tval if %tmp is true + +def : Pat<(i64 (select i32:$pred, i64:$t, i64:$f)), + (CMOVWrr CC_INE, $pred, $t, $f)>; + +def : Pat<(i32 (select i32:$pred, i32:$t, i32:$f)), + (EXTRACT_SUBREG + (CMOVWrr CC_INE, $pred, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_i32)), + sub_i32)>; + +def : Pat<(f128 (select i32:$pred, f128:$t, f128:$f)), + (INSERT_SUBREG + (INSERT_SUBREG (f128 (IMPLICIT_DEF)), + (CMOVWrr CC_INE, $pred, + (EXTRACT_SUBREG $t, sub_odd), + (EXTRACT_SUBREG $f, sub_odd)), sub_odd), + (CMOVWrr CC_INE, $pred, + (EXTRACT_SUBREG $t, sub_even), + (EXTRACT_SUBREG $f, sub_even)), sub_even)>; + +def : Pat<(f64 (select i32:$pred, f64:$t, f64:$f)), + (CMOVWrr CC_INE, $pred, $t, $f)>; + +def : Pat<(f32 (select i32:$pred, f32:$t, f32:$f)), + (EXTRACT_SUBREG + (CMOVWrr CC_INE, $pred, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $t, sub_f32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $f, sub_f32)), + sub_f32)>; + +// bitconvert +def : Pat<(f64 (bitconvert i64:$src)), (COPY_TO_REGCLASS $src, I64)>; +def : Pat<(i64 (bitconvert f64:$src)), (COPY_TO_REGCLASS $src, I64)>; + +def : Pat<(i32 (bitconvert f32:$op)), + (EXTRACT_SUBREG (SRAXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + $op, sub_f32), 32), sub_i32)>; +def : Pat<(f32 (bitconvert i32:$op)), + (EXTRACT_SUBREG (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + $op, sub_i32), 32), sub_f32)>; + +// Bits operations pattern matchings. +def : Pat<(i32 (ctpop i32:$src)), + (EXTRACT_SUBREG (PCNTr (ANDrm0 (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), $src, sub_i32), 32)), sub_i32)>; +def : Pat<(i32 (bitreverse i32:$src)), + (EXTRACT_SUBREG (SRLri (BRVr (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), $src, sub_i32)), 32), sub_i32)>; +def : Pat<(i32 (ctlz i32:$src)), + (EXTRACT_SUBREG (LDZr (SLLri (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), $src, sub_i32), 32)), sub_i32)>; +def : Pat<(i64 (bswap i64:$src)), + (BSWPri $src, 0)>; +def : Pat<(i32 (bswap i32:$src)), + (EXTRACT_SUBREG (BSWPri (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), $src, sub_i32), 1), sub_i32)>; + +// Several special pattern matches to optimize code + +def : Pat<(i32 (and i32:$lhs, 0xff)), + (AND32rm0 $lhs, 56)>; +def : Pat<(i32 (and i32:$lhs, 0xffff)), + (AND32rm0 $lhs, 48)>; +def : Pat<(i32 (and i32:$lhs, 0xffffffff)), + (AND32rm0 $lhs, 32)>; + +// vector instructions +include "VEInstrVec.td" +include "VEInstrVecVL.td" + +// generic vector instruction patterns +include "VEInstrPatternsVec.td" + +// vevlintrin +include "VEInstrIntrinsicVL.td" diff --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.gen.td @@ -0,0 +1,1699 @@ +def : Pat<(int_ve_vl_vld_vssl i64:$sy, i64:$sz, i32:$vl), (vld_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vld_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vld_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vld_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vld_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vld_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vld_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldnc_vssl i64:$sy, i64:$sz, i32:$vl), (vldnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldu_vssl i64:$sy, i64:$sz, i32:$vl), (vldu_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldu_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldu_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldu_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldu_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldu_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldu_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldunc_vssl i64:$sy, i64:$sz, i32:$vl), (vldunc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldunc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldunc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldunc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldunc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldunc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldunc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsx_vssl i64:$sy, i64:$sz, i32:$vl), (vldlsx_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldlsx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsx_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldlsx_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsx_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldlsx_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsxnc_vssl i64:$sy, i64:$sz, i32:$vl), (vldlsxnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldlsxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsxnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldlsxnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlsxnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldlsxnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzx_vssl i64:$sy, i64:$sz, i32:$vl), (vldlzx_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldlzx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzx_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldlzx_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzx_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldlzx_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzxnc_vssl i64:$sy, i64:$sz, i32:$vl), (vldlzxnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldlzxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzxnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldlzxnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldlzxnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldlzxnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vld2d_vssl i64:$sy, i64:$sz, i32:$vl), (vld2d_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vld2d_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vld2d_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vld2d_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vld2d_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vld2d_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vld2d_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vld2dnc_vssl i64:$sy, i64:$sz, i32:$vl), (vld2dnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vld2dnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vld2dnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vld2dnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vld2dnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vld2dnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vld2dnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2d_vssl i64:$sy, i64:$sz, i32:$vl), (vldu2d_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2d_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldu2d_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2d_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldu2d_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2d_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldu2d_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2dnc_vssl i64:$sy, i64:$sz, i32:$vl), (vldu2dnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2dnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldu2dnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2dnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldu2dnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldu2dnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldu2dnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsx_vssl i64:$sy, i64:$sz, i32:$vl), (vldl2dsx_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldl2dsx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsx_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldl2dsx_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsx_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldl2dsx_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsxnc_vssl i64:$sy, i64:$sz, i32:$vl), (vldl2dsxnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldl2dsxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsxnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldl2dsxnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dsxnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldl2dsxnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzx_vssl i64:$sy, i64:$sz, i32:$vl), (vldl2dzx_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldl2dzx_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzx_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldl2dzx_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzx_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldl2dzx_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzxnc_vssl i64:$sy, i64:$sz, i32:$vl), (vldl2dzxnc_vssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vldl2dzxnc_vssvl i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzxnc_vssl (i64 simm7:$I), i64:$sz, i32:$vl), (vldl2dzxnc_vIsl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vldl2dzxnc_vssvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vldl2dzxnc_vIsvl (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vst_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vst_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vst_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vst_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vst_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstnc_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstnc_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstnc_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstnc_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstncot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstncot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstncot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstncot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstu_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstu_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstu_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstu_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstunc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstunc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstunc_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstunc_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstunc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstunc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstunc_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstunc_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstuot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstuot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstuot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstuot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstuot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstuot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstuot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstuot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstuncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstuncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstuncot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstuncot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstuncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstuncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstuncot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstuncot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstl_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstl_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstl_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstl_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstlnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstlnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstlnc_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstlnc_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstlnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstlnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstlnc_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstlnc_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstlot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstlot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstlot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstlot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstlot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstlot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstlot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstlot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstlncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstlncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstlncot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstlncot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstlncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstlncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstlncot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstlncot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2d_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vst2d_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2d_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vst2d_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2d_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vst2d_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2d_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vst2d_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vst2dnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dnc_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vst2dnc_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vst2dnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dnc_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vst2dnc_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vst2dot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vst2dot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vst2dot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vst2dot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vst2dncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dncot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vst2dncot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vst2dncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vst2dncot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vst2dncot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2d_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstu2d_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2d_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstu2d_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2d_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstu2d_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2d_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstu2d_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstu2dnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dnc_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstu2dnc_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstu2dnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dnc_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstu2dnc_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstu2dot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstu2dot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstu2dot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstu2dot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstu2dncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dncot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstu2dncot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstu2dncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstu2dncot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstu2dncot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2d_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstl2d_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2d_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstl2d_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2d_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstl2d_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2d_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstl2d_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstl2dnc_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dnc_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstl2dnc_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstl2dnc_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dnc_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstl2dnc_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstl2dot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstl2dot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstl2dot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstl2dot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl), (vstl2dncot_vssl v256f64:$vx, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dncot_vssl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl), (vstl2dncot_vIsl v256f64:$vx, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vstl2dncot_vssml v256f64:$vx, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vstl2dncot_vssml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vstl2dncot_vIsml v256f64:$vx, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pfchv_ssl i64:$sy, i64:$sz, i32:$vl), (pfchv_ssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_pfchv_ssl (i64 simm7:$I), i64:$sz, i32:$vl), (pfchv_Isl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_pfchvnc_ssl i64:$sy, i64:$sz, i32:$vl), (pfchvnc_ssl i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_pfchvnc_ssl (i64 simm7:$I), i64:$sz, i32:$vl), (pfchvnc_Isl (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_lsv_vvss v256f64:$vd, i32:$sy, i64:$sz), (lsv_vvss v256f64:$vd, i32:$sy, i64:$sz)>; +def : Pat<(int_ve_vl_lvsl_svs v256f64:$vx, i32:$sy), (lvsl_svs v256f64:$vx, i32:$sy)>; +def : Pat<(int_ve_vl_lvsd_svs v256f64:$vx, i32:$sy), (lvsl_svs v256f64:$vx, i32:$sy)>; +def : Pat<(int_ve_vl_lvss_svs v256f64:$vx, i32:$sy), (lvss_svs v256f64:$vx, i32:$sy)>; +def : Pat<(int_ve_vl_lvm_mmss v4i64:$vmd, i64:$sy, i64:$sz), (lvm_mmss v4i64:$vmd, i64:$sy, i64:$sz)>; +def : Pat<(int_ve_vl_lvm_mmss v4i64:$vmd, (i64 uimm6:$N), i64:$sz), (lvm_mmIs v4i64:$vmd, (i64 uimm6:$N), i64:$sz)>; +def : Pat<(int_ve_vl_lvm_MMss v8i64:$vmd, i64:$sy, i64:$sz), (lvm_MMss v8i64:$vmd, i64:$sy, i64:$sz)>; +def : Pat<(int_ve_vl_lvm_MMss v8i64:$vmd, (i64 uimm6:$N), i64:$sz), (lvm_MMIs v8i64:$vmd, (i64 uimm6:$N), i64:$sz)>; +def : Pat<(int_ve_vl_svm_sms v4i64:$vmz, i64:$sy), (svm_sms v4i64:$vmz, i64:$sy)>; +def : Pat<(int_ve_vl_svm_sms v4i64:$vmz, (i64 uimm6:$N)), (svm_smI v4i64:$vmz, (i64 uimm6:$N))>; +def : Pat<(int_ve_vl_svm_sMs v8i64:$vmz, i64:$sy), (svm_sMs v8i64:$vmz, i64:$sy)>; +def : Pat<(int_ve_vl_svm_sMs v8i64:$vmz, (i64 uimm6:$N)), (svm_sMI v8i64:$vmz, (i64 uimm6:$N))>; +def : Pat<(int_ve_vl_vbrdd_vsl f64:$sy, i32:$vl), (vbrd_vsl f64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdd_vsvl f64:$sy, v256f64:$vd, i32:$vl), (vbrd_vsvl f64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdd_vsmvl f64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vbrd_vsmvl f64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdl_vsl i64:$sy, i32:$vl), (vbrd_vsl i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdl_vsvl i64:$sy, v256f64:$vd, i32:$vl), (vbrd_vsvl i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdl_vsmvl i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vbrd_vsmvl i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdl_vsl (i64 simm7:$I), i32:$vl), (vbrd_vIl (i64 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vbrdl_vsvl (i64 simm7:$I), v256f64:$vd, i32:$vl), (vbrd_vIvl (i64 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdl_vsmvl (i64 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vbrd_vImvl (i64 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrds_vsl f32:$sy, i32:$vl), (vbrdu_vsl f32:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vbrds_vsvl f32:$sy, v256f64:$vd, i32:$vl), (vbrdu_vsvl f32:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrds_vsmvl f32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vbrdu_vsmvl f32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdw_vsl i32:$sy, i32:$vl), (vbrdl_vsl i32:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdw_vsvl i32:$sy, v256f64:$vd, i32:$vl), (vbrdl_vsvl i32:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdw_vsmvl i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vbrdl_vsmvl i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdw_vsl (i32 simm7:$I), i32:$vl), (vbrdl_vIl (i32 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vbrdw_vsvl (i32 simm7:$I), v256f64:$vd, i32:$vl), (vbrdl_vIvl (i32 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vbrdw_vsmvl (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vbrdl_vImvl (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvbrd_vsl i64:$sy, i32:$vl), (pvbrd_vsl i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvbrd_vsvl i64:$sy, v256f64:$vd, i32:$vl), (pvbrd_vsvl i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvbrd_vsMvl i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvbrd_vsMvl i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmv_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmv_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmv_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmv_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmv_vsvl (i32 uimm7:$N), v256f64:$vz, i32:$vl), (vmv_vIvl (i32 uimm7:$N), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmv_vsvvl (i32 uimm7:$N), v256f64:$vz, v256f64:$vd, i32:$vl), (vmv_vIvvl (i32 uimm7:$N), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmv_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmv_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmv_vsvmvl (i32 uimm7:$N), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmv_vIvmvl (i32 uimm7:$N), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vaddul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vaddul_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vaddul_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vaddul_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddul_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddul_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vadduw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vadduw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vadduw_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vadduw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vadduw_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vadduw_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vadduw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vadduw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vadduw_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vadduw_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvaddu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvaddu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvaddu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvaddu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvaddu_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvaddu_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvaddu_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvaddu_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvaddu_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvaddu_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvaddu_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvaddu_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vaddswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vaddswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vaddswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vaddswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vaddswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vaddswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vaddswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vaddswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvadds_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvadds_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvadds_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvadds_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvadds_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvadds_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvadds_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvadds_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvadds_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvadds_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvadds_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvadds_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vaddsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vaddsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vaddsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vaddsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vaddsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vaddsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vaddsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vsubul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vsubul_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vsubul_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vsubul_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubul_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubul_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vsubuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vsubuw_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vsubuw_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vsubuw_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubuw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubuw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubuw_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubuw_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvsubu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvsubu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubu_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvsubu_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubu_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvsubu_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubu_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsubu_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubu_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsubu_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vsubswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vsubswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vsubswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vsubswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vsubswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vsubswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vsubswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vsubswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvsubs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvsubs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubs_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvsubs_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubs_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvsubs_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubs_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsubs_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsubs_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsubs_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vsubsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vsubsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vsubsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vsubsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vsubsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsubsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsubsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmulul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vmulul_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vmulul_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmulul_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulul_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulul_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmuluw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmuluw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmuluw_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmuluw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vmuluw_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmuluw_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmuluw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmuluw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmuluw_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmuluw_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmulswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmulswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vmulswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmulswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmulswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmulswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vmulswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmulswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmulsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vmulsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vmulsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmulsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmulsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulslw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmulslw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulslw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulslw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulslw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmulslw_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulslw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmulslw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmulslw_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vmulslw_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmulslw_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmulslw_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vdivul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vdivul_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vdivul_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vdivul_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivul_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vdivuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vdivuw_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vdivuw_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vdivuw_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivuw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivuw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivuw_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvsl v256f64:$vy, i64:$sy, i32:$vl), (vdivul_vvsl v256f64:$vy, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvsvl v256f64:$vy, i64:$sy, v256f64:$vd, i32:$vl), (vdivul_vvsvl v256f64:$vy, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvsl v256f64:$vy, (i64 simm7:$I), i32:$vl), (vdivul_vvIl v256f64:$vy, (i64 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvsvl v256f64:$vy, (i64 simm7:$I), v256f64:$vd, i32:$vl), (vdivul_vvIvl v256f64:$vy, (i64 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvsmvl v256f64:$vy, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivul_vvsmvl v256f64:$vy, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivul_vvsmvl v256f64:$vy, (i64 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vdivul_vvImvl v256f64:$vy, (i64 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvsl v256f64:$vy, i32:$sy, i32:$vl), (vdivuw_vvsl v256f64:$vy, i32:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvsvl v256f64:$vy, i32:$sy, v256f64:$vd, i32:$vl), (vdivuw_vvsvl v256f64:$vy, i32:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvsl v256f64:$vy, (i32 simm7:$I), i32:$vl), (vdivuw_vvIl v256f64:$vy, (i32 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvsvl v256f64:$vy, (i32 simm7:$I), v256f64:$vd, i32:$vl), (vdivuw_vvIvl v256f64:$vy, (i32 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvsmvl v256f64:$vy, i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivuw_vvsmvl v256f64:$vy, i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivuw_vvsmvl v256f64:$vy, (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vdivuw_vvImvl v256f64:$vy, (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vdivswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vdivswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vdivswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vdivswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vdivswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vdivswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vdivswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vdivswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvsl v256f64:$vy, i32:$sy, i32:$vl), (vdivswsx_vvsl v256f64:$vy, i32:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvsvl v256f64:$vy, i32:$sy, v256f64:$vd, i32:$vl), (vdivswsx_vvsvl v256f64:$vy, i32:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvsl v256f64:$vy, (i32 simm7:$I), i32:$vl), (vdivswsx_vvIl v256f64:$vy, (i32 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvsvl v256f64:$vy, (i32 simm7:$I), v256f64:$vd, i32:$vl), (vdivswsx_vvIvl v256f64:$vy, (i32 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvsmvl v256f64:$vy, i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswsx_vvsmvl v256f64:$vy, i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswsx_vvsmvl v256f64:$vy, (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswsx_vvImvl v256f64:$vy, (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvsl v256f64:$vy, i32:$sy, i32:$vl), (vdivswzx_vvsl v256f64:$vy, i32:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvsvl v256f64:$vy, i32:$sy, v256f64:$vd, i32:$vl), (vdivswzx_vvsvl v256f64:$vy, i32:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvsl v256f64:$vy, (i32 simm7:$I), i32:$vl), (vdivswzx_vvIl v256f64:$vy, (i32 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvsvl v256f64:$vy, (i32 simm7:$I), v256f64:$vd, i32:$vl), (vdivswzx_vvIvl v256f64:$vy, (i32 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvsmvl v256f64:$vy, i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswzx_vvsmvl v256f64:$vy, i32:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivswzx_vvsmvl v256f64:$vy, (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vdivswzx_vvImvl v256f64:$vy, (i32 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vdivsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vdivsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vdivsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vdivsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vdivsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvsl v256f64:$vy, i64:$sy, i32:$vl), (vdivsl_vvsl v256f64:$vy, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvsvl v256f64:$vy, i64:$sy, v256f64:$vd, i32:$vl), (vdivsl_vvsvl v256f64:$vy, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvsl v256f64:$vy, (i64 simm7:$I), i32:$vl), (vdivsl_vvIl v256f64:$vy, (i64 simm7:$I), i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvsvl v256f64:$vy, (i64 simm7:$I), v256f64:$vd, i32:$vl), (vdivsl_vvIvl v256f64:$vy, (i64 simm7:$I), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvsmvl v256f64:$vy, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vdivsl_vvsmvl v256f64:$vy, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vdivsl_vvsmvl v256f64:$vy, (i64 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl), (vdivsl_vvImvl v256f64:$vy, (i64 simm7:$I), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vcmpul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vcmpul_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vcmpul_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpul_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpul_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpul_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpul_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpul_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vcmpuw_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpuw_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vcmpuw_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpuw_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vcmpuw_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpuw_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpuw_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpuw_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpuw_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpuw_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvcmpu_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvcmpu_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvcmpu_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvcmpu_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvcmpu_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmpu_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvcmpu_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vcmpswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vcmpswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vcmpswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vcmpswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vcmpswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vcmpswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvcmps_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvcmps_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvcmps_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvcmps_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvcmps_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcmps_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvcmps_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vcmpsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vcmpsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vcmpsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vcmpsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcmpsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcmpsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmaxswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmaxswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vmaxswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmaxswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vmaxswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vmaxswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvmaxs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvmaxs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvmaxs_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvmaxs_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvmaxs_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmaxs_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvmaxs_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vminswsx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vminswsx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vminswsx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vminswsx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vminswsx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vminswsx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminswsx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminswsx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswsx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminswsx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vminswzx_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vminswzx_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl), (vminswzx_vsvl i32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vminswzx_vsvvl i32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvl (i32 simm7:$I), v256f64:$vz, i32:$vl), (vminswzx_vIvl (i32 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vminswzx_vIvvl (i32 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminswzx_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminswzx_vsvmvl i32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminswzx_vsvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminswzx_vIvmvl (i32 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvmins_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvmins_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvmins_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvmins_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvmins_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvmins_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvmins_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vmaxsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vmaxsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vmaxsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vmaxsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmaxsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmaxsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vminsl_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vminsl_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vminsl_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vminsl_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvl (i64 simm7:$I), v256f64:$vz, i32:$vl), (vminsl_vIvl (i64 simm7:$I), v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl), (vminsl_vIvvl (i64 simm7:$I), v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminsl_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminsl_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vminsl_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vminsl_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vand_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vand_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vand_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vand_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vand_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vand_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vand_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvandlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvandlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvandlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandlo_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvandlo_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvandlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvandlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvandlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvandlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvandup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvandup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvandup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandup_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvandup_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvandup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvandup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvandup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvandup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvandup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvand_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvand_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvand_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvand_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvand_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvand_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvand_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vor_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vor_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vor_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vor_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvorlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvorlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvorlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorlo_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvorlo_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvorlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvorlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvorlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvorlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvorup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvorup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvorup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorup_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvorup_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvorup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvorup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvorup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvorup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvorup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvor_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvor_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvor_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvor_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vxor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vxor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (vxor_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vxor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vxor_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vxor_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vxor_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvxorlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvxorlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorlo_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvxorlo_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvxorlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvxorlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvxorlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvxorup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvxorup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorup_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvxorup_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvxorup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvxorup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxorup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvxorup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvxor_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvxor_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvxor_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvxor_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvxor_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvxor_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvxor_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_veqv_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (veqv_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_veqv_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (veqv_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_veqv_vsvl i64:$sy, v256f64:$vz, i32:$vl), (veqv_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_veqv_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (veqv_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_veqv_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (veqv_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_veqv_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (veqv_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pveqvlo_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pveqvlo_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvlo_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pveqvlo_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pveqvlo_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pveqvlo_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pveqvlo_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pveqvup_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pveqvup_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvup_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pveqvup_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pveqvup_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pveqvup_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqvup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pveqvup_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqv_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pveqv_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pveqv_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pveqv_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqv_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pveqv_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pveqv_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pveqv_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqv_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pveqv_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pveqv_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pveqv_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vseq_vl i32:$vl), (vseq_vl i32:$vl)>; +def : Pat<(int_ve_vl_vseq_vvl v256f64:$vd, i32:$vl), (vseq_vvl v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvseqlo_vl i32:$vl), (pvseqlo_vl i32:$vl)>; +def : Pat<(int_ve_vl_pvseqlo_vvl v256f64:$vd, i32:$vl), (pvseqlo_vvl v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsequp_vl i32:$vl), (pvsequp_vl i32:$vl)>; +def : Pat<(int_ve_vl_pvsequp_vvl v256f64:$vd, i32:$vl), (pvsequp_vvl v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvseq_vl i32:$vl), (pvseq_vl i32:$vl)>; +def : Pat<(int_ve_vl_pvseq_vvl v256f64:$vd, i32:$vl), (pvseq_vvl v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (vsll_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (vsll_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvsl v256f64:$vz, i64:$sy, i32:$vl), (vsll_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vsll_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvsl v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vsll_vvIl v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvsvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vsll_vvIvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsll_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsll_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsll_vvsmvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl), (vsll_vvImvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslllo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvslllo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvslllo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvslllo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslllo_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvslllo_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvslllo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvslllo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslllo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvslllo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslllo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvslllo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsllup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsllup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsllup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsllup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsllup_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsllup_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsllup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsllup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsllup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsllup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsllup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsllup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsll_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsll_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsll_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsll_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsll_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsll_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsll_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsll_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsll_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsll_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsll_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsll_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (vsrl_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (vsrl_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvsl v256f64:$vz, i64:$sy, i32:$vl), (vsrl_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vsrl_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvsl v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vsrl_vvIl v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvsvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vsrl_vvIvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsrl_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsrl_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsrl_vvsmvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl), (vsrl_vvImvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrllo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsrllo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrllo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsrllo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrllo_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsrllo_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrllo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsrllo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrllo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsrllo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrllo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsrllo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrlup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsrlup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrlup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsrlup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrlup_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsrlup_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrlup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsrlup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrlup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsrlup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrlup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsrlup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrl_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsrl_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrl_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsrl_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrl_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsrl_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrl_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsrl_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrl_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsrl_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsrl_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsrl_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (vslaw_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (vslaw_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvsl v256f64:$vz, i64:$sy, i32:$vl), (vslaw_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vslaw_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvsl v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vslaw_vvIl v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvsvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vslaw_vvIvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vslaw_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vslaw_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslaw_vvsmvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl), (vslaw_vvImvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslalo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvslalo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvslalo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvslalo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslalo_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvslalo_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvslalo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvslalo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslalo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvslalo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslalo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvslalo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslaup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvslaup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvslaup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvslaup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslaup_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvslaup_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvslaup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvslaup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslaup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvslaup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvslaup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvslaup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsla_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsla_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsla_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsla_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsla_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsla_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsla_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsla_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsla_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsla_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsla_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsla_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (vslal_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (vslal_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvsl v256f64:$vz, i64:$sy, i32:$vl), (vslal_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vslal_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvsl v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vslal_vvIl v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvsvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vslal_vvIvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vslal_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vslal_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vslal_vvsmvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl), (vslal_vvImvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (vsraw_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (vsraw_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvsl v256f64:$vz, i64:$sy, i32:$vl), (vsraw_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vsraw_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvsl v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vsraw_vvIl v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvsvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vsraw_vvIvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsraw_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsraw_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsraw_vvsmvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl), (vsraw_vvImvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsralo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsralo_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsralo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsralo_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsralo_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsralo_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsralo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsralo_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsralo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsralo_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsralo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsralo_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsraup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsraup_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsraup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsraup_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsraup_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsraup_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsraup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsraup_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsraup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsraup_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsraup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsraup_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsra_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (pvsra_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsra_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (pvsra_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsra_vvsl v256f64:$vz, i64:$sy, i32:$vl), (pvsra_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_pvsra_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (pvsra_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsra_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsra_vvvMvl v256f64:$vz, v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvsra_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvsra_vvsMvl v256f64:$vz, i64:$sy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvvl v256f64:$vz, v256f64:$vy, i32:$vl), (vsral_vvvl v256f64:$vz, v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl), (vsral_vvvvl v256f64:$vz, v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvsl v256f64:$vz, i64:$sy, i32:$vl), (vsral_vvsl v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vsral_vvsvl v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvsl v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vsral_vvIl v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvsvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vsral_vvIvl v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsral_vvvmvl v256f64:$vz, v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl), (vsral_vvsmvl v256f64:$vz, i64:$sy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsral_vvsmvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl), (vsral_vvImvl v256f64:$vz, (i64 uimm6:$N), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsfa_vvssl v256f64:$vz, i64:$sy, i64:$sz, i32:$vl), (vsfa_vvssl v256f64:$vz, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsfa_vvssvl v256f64:$vz, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vsfa_vvssvl v256f64:$vz, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsfa_vvssl v256f64:$vz, (i64 simm7:$I), i64:$sz, i32:$vl), (vsfa_vvIsl v256f64:$vz, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsfa_vvssvl v256f64:$vz, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vsfa_vvIsvl v256f64:$vz, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsfa_vvssmvl v256f64:$vz, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsfa_vvssmvl v256f64:$vz, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsfa_vvssmvl v256f64:$vz, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vsfa_vvIsmvl v256f64:$vz, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfaddd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfaddd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfaddd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfaddd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfaddd_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfaddd_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfaddd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfaddd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfaddd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfaddd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfaddd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfaddd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfadds_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfadds_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfadds_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfadds_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfadds_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfadds_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfadds_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfadds_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfadds_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfadds_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfadds_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfadds_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfadd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvfadd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfadd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfadd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfadd_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvfadd_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfadd_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfadd_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfadd_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfadd_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfadd_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfadd_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfsubd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfsubd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubd_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfsubd_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfsubd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfsubd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfsubd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfsubs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfsubs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubs_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfsubs_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfsubs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubs_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfsubs_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsubs_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfsubs_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfsub_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvfsub_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfsub_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfsub_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfsub_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvfsub_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfsub_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfsub_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfsub_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfsub_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfsub_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfsub_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuld_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfmuld_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuld_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmuld_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuld_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfmuld_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuld_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmuld_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuld_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmuld_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuld_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmuld_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuls_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfmuls_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuls_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmuls_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuls_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfmuls_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuls_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmuls_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuls_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmuls_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmuls_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmuls_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvfmul_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfmul_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmul_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvfmul_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfmul_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmul_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmul_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmul_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmul_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfdivd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfdivd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivd_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfdivd_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfdivd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfdivd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfdivd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfdivs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfdivs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivs_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfdivs_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfdivs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivs_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfdivs_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfdivs_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfdivs_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsqrtd_vvl v256f64:$vy, i32:$vl), (vfsqrtd_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfsqrtd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfsqrtd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfsqrts_vvl v256f64:$vy, i32:$vl), (vfsqrts_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfsqrts_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfsqrts_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmpd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfcmpd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmpd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfcmpd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmpd_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfcmpd_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmpd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfcmpd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmpd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfcmpd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmpd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfcmpd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmps_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfcmps_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmps_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfcmps_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmps_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfcmps_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmps_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfcmps_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmps_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfcmps_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfcmps_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfcmps_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfcmp_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvfcmp_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfcmp_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfcmp_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfcmp_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvfcmp_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfcmp_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfcmp_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfcmp_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfcmp_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfcmp_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfcmp_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfmaxd_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmaxd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxd_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfmaxd_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmaxd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmaxd_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmaxd_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfmaxs_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmaxs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxs_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfmaxs_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmaxs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxs_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmaxs_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmaxs_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmaxs_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmax_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvfmax_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmax_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfmax_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmax_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvfmax_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmax_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfmax_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmax_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmax_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmax_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmax_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmind_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfmind_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmind_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmind_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmind_vsvl f64:$sy, v256f64:$vz, i32:$vl), (vfmind_vsvl f64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmind_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmind_vsvvl f64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmind_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmind_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmind_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmind_vsvmvl f64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmins_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (vfmins_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmins_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmins_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmins_vsvl f32:$sy, v256f64:$vz, i32:$vl), (vfmins_vsvl f32:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmins_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (vfmins_vsvvl f32:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmins_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmins_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmins_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmins_vsvmvl f32:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmin_vvvl v256f64:$vy, v256f64:$vz, i32:$vl), (pvfmin_vvvl v256f64:$vy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmin_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfmin_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmin_vsvl i64:$sy, v256f64:$vz, i32:$vl), (pvfmin_vsvl i64:$sy, v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmin_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl), (pvfmin_vsvvl i64:$sy, v256f64:$vz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmin_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmin_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmin_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmin_vsvMvl i64:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmadd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmadd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmadd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmadd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl), (vfmadd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmadd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmadd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmadd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmadd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmadd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmads_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmads_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmads_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmads_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl), (vfmads_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmads_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmads_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmads_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmads_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmads_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfmad_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfmad_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfmad_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfmad_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl), (pvfmad_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfmad_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmad_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmad_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmad_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmad_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmsbd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmsbd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmsbd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmsbd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl), (vfmsbd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmsbd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmsbd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmsbd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmsbd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmsbs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmsbs_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfmsbs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmsbs_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl), (vfmsbs_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfmsbs_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmsbs_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmsbs_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmsbs_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfmsbs_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfmsb_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfmsb_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfmsb_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfmsb_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl), (pvfmsb_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfmsb_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmsb_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmsb_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmsb_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfmsb_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmadd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmadd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmadd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmadd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl), (vfnmadd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmadd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmadd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmadd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmadd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmadd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmads_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmads_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmads_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmads_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl), (vfnmads_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmads_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmads_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmads_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmads_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmads_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfnmad_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfnmad_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfnmad_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfnmad_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl), (pvfnmad_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfnmad_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfnmad_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfnmad_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmad_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfnmad_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmsbd_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmsbd_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmsbd_vsvvl f64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmsbd_vsvvvl f64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl), (vfnmsbd_vvsvl v256f64:$vy, f64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmsbd_vvsvvl v256f64:$vy, f64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmsbd_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmsbd_vsvvmvl f64:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmsbd_vvsvmvl v256f64:$vy, f64:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmsbs_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmsbs_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (vfnmsbs_vsvvl f32:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmsbs_vsvvvl f32:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl), (vfnmsbs_vvsvl v256f64:$vy, f32:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (vfnmsbs_vvsvvl v256f64:$vy, f32:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmsbs_vvvvmvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmsbs_vsvvmvl f32:$sy, v256f64:$vz, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfnmsbs_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl), (vfnmsbs_vvsvmvl v256f64:$vy, f32:$sy, v256f64:$vw, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfnmsb_vvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfnmsb_vvvvvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl), (pvfnmsb_vsvvl i64:$sy, v256f64:$vz, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfnmsb_vsvvvl i64:$sy, v256f64:$vz, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl), (pvfnmsb_vvsvl v256f64:$vy, i64:$sy, v256f64:$vw, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl), (pvfnmsb_vvsvvl v256f64:$vy, i64:$sy, v256f64:$vw, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfnmsb_vvvvMvl v256f64:$vy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfnmsb_vsvvMvl i64:$sy, v256f64:$vz, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvfnmsb_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl), (pvfnmsb_vvsvMvl v256f64:$vy, i64:$sy, v256f64:$vw, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrcpd_vvl v256f64:$vy, i32:$vl), (vrcpd_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrcpd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrcpd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrcps_vvl v256f64:$vy, i32:$vl), (vrcps_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrcps_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrcps_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvrcp_vvl v256f64:$vy, i32:$vl), (pvrcp_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvrcp_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (pvrcp_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrtd_vvl v256f64:$vy, i32:$vl), (vrsqrtd_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrtd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrsqrtd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrts_vvl v256f64:$vy, i32:$vl), (vrsqrts_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrts_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrsqrts_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvrsqrt_vvl v256f64:$vy, i32:$vl), (pvrsqrt_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvrsqrt_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (pvrsqrt_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrtdnex_vvl v256f64:$vy, i32:$vl), (vrsqrtdnex_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrtdnex_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrsqrtdnex_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrtsnex_vvl v256f64:$vy, i32:$vl), (vrsqrtsnex_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrsqrtsnex_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrsqrtsnex_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvrsqrtnex_vvl v256f64:$vy, i32:$vl), (pvrsqrtnex_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvrsqrtnex_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (pvrsqrtnex_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdsx_vvl v256f64:$vy, i32:$vl), (vcvtwdsx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwdsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdsx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwdsx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdsxrz_vvl v256f64:$vy, i32:$vl), (vcvtwdsxrz_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdsxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwdsxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdsxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwdsxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdzx_vvl v256f64:$vy, i32:$vl), (vcvtwdzx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwdzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdzx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwdzx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdzxrz_vvl v256f64:$vy, i32:$vl), (vcvtwdzxrz_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdzxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwdzxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwdzxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwdzxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwssx_vvl v256f64:$vy, i32:$vl), (vcvtwssx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwssx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwssx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwssx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwssx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwssxrz_vvl v256f64:$vy, i32:$vl), (vcvtwssxrz_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwssxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwssxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwssxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwssxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwszx_vvl v256f64:$vy, i32:$vl), (vcvtwszx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwszx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwszx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwszx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwszx_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwszxrz_vvl v256f64:$vy, i32:$vl), (vcvtwszxrz_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwszxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtwszxrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtwszxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtwszxrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtws_vvl v256f64:$vy, i32:$vl), (pvcvtws_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtws_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (pvcvtws_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtws_vvMvl v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvcvtws_vvMvl v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtwsrz_vvl v256f64:$vy, i32:$vl), (pvcvtwsrz_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtwsrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (pvcvtwsrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtwsrz_vvMvl v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl), (pvcvtwsrz_vvMvl v256f64:$vy, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtld_vvl v256f64:$vy, i32:$vl), (vcvtld_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtld_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtld_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtld_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtld_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtldrz_vvl v256f64:$vy, i32:$vl), (vcvtldrz_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtldrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtldrz_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtldrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl), (vcvtldrz_vvmvl v256f64:$vy, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtdw_vvl v256f64:$vy, i32:$vl), (vcvtdw_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtdw_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtdw_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtsw_vvl v256f64:$vy, i32:$vl), (vcvtsw_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtsw_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtsw_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtsw_vvl v256f64:$vy, i32:$vl), (pvcvtsw_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_pvcvtsw_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (pvcvtsw_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtdl_vvl v256f64:$vy, i32:$vl), (vcvtdl_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtdl_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtdl_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtds_vvl v256f64:$vy, i32:$vl), (vcvtds_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtds_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtds_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtsd_vvl v256f64:$vy, i32:$vl), (vcvtsd_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vcvtsd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vcvtsd_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmrg_vvvml v256f64:$vy, v256f64:$vz, v4i64:$vm, i32:$vl), (vmrg_vvvml v256f64:$vy, v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vmrg_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmrg_vvvmvl v256f64:$vy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmrg_vsvml i64:$sy, v256f64:$vz, v4i64:$vm, i32:$vl), (vmrg_vsvml i64:$sy, v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vmrg_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmrg_vsvmvl i64:$sy, v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmrg_vsvml (i64 simm7:$I), v256f64:$vz, v4i64:$vm, i32:$vl), (vmrg_vIvml (i64 simm7:$I), v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vmrg_vsvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vmrg_vIvmvl (i64 simm7:$I), v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmrgw_vvvMl v256f64:$vy, v256f64:$vz, v8i64:$vm, i32:$vl), (vmrgw_vvvMl v256f64:$vy, v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vmrgw_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (vmrgw_vvvMvl v256f64:$vy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vmrgw_vsvMl i32:$sy, v256f64:$vz, v8i64:$vm, i32:$vl), (vmrgw_vsvMl i32:$sy, v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vmrgw_vsvMvl i32:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl), (vmrgw_vsvMvl i32:$sy, v256f64:$vz, v8i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vshf_vvvsl v256f64:$vy, v256f64:$vz, i64:$sy, i32:$vl), (vshf_vvvsl v256f64:$vy, v256f64:$vz, i64:$sy, i32:$vl)>; +def : Pat<(int_ve_vl_vshf_vvvsvl v256f64:$vy, v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl), (vshf_vvvsvl v256f64:$vy, v256f64:$vz, i64:$sy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vshf_vvvsl v256f64:$vy, v256f64:$vz, (i64 uimm6:$N), i32:$vl), (vshf_vvvIl v256f64:$vy, v256f64:$vz, (i64 uimm6:$N), i32:$vl)>; +def : Pat<(int_ve_vl_vshf_vvvsvl v256f64:$vy, v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl), (vshf_vvvIvl v256f64:$vy, v256f64:$vz, (i64 uimm6:$N), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vcp_vvmvl v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vcp_vvmvl v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vex_vvmvl v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl), (vex_vvmvl v256f64:$vz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklat_ml i32:$vl), (vfmklat_ml i32:$vl)>; +def : Pat<(int_ve_vl_vfmklaf_ml i32:$vl), (vfmklaf_ml i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloat_ml i32:$vl), (pvfmkwloat_ml i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupat_ml i32:$vl), (pvfmkwupat_ml i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloaf_ml i32:$vl), (pvfmkwloaf_ml i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupaf_ml i32:$vl), (pvfmkwupaf_ml i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkat_Ml i32:$vl), (pvfmkat_Ml i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkaf_Ml i32:$vl), (pvfmkaf_Ml i32:$vl)>; +def : Pat<(int_ve_vl_vfmklgt_mvl v256f64:$vz, i32:$vl), (vfmklgt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkllt_mvl v256f64:$vz, i32:$vl), (vfmkllt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkllt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkllt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklne_mvl v256f64:$vz, i32:$vl), (vfmklne_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklne_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklne_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkleq_mvl v256f64:$vz, i32:$vl), (vfmkleq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkleq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkleq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklge_mvl v256f64:$vz, i32:$vl), (vfmklge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklle_mvl v256f64:$vz, i32:$vl), (vfmklle_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklle_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklle_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklnum_mvl v256f64:$vz, i32:$vl), (vfmklnum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklnan_mvl v256f64:$vz, i32:$vl), (vfmklnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklgtnan_mvl v256f64:$vz, i32:$vl), (vfmklgtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklltnan_mvl v256f64:$vz, i32:$vl), (vfmklltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklnenan_mvl v256f64:$vz, i32:$vl), (vfmklnenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkleqnan_mvl v256f64:$vz, i32:$vl), (vfmkleqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkleqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkleqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklgenan_mvl v256f64:$vz, i32:$vl), (vfmklgenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmklgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmklgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkllenan_mvl v256f64:$vz, i32:$vl), (vfmkllenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkllenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkllenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwgt_mvl v256f64:$vz, i32:$vl), (vfmkwgt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwlt_mvl v256f64:$vz, i32:$vl), (vfmkwlt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwlt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwlt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwne_mvl v256f64:$vz, i32:$vl), (vfmkwne_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwne_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwne_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkweq_mvl v256f64:$vz, i32:$vl), (vfmkweq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkweq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkweq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwge_mvl v256f64:$vz, i32:$vl), (vfmkwge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwle_mvl v256f64:$vz, i32:$vl), (vfmkwle_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwle_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwle_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwnum_mvl v256f64:$vz, i32:$vl), (vfmkwnum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwnan_mvl v256f64:$vz, i32:$vl), (vfmkwnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwgtnan_mvl v256f64:$vz, i32:$vl), (vfmkwgtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwltnan_mvl v256f64:$vz, i32:$vl), (vfmkwltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwnenan_mvl v256f64:$vz, i32:$vl), (vfmkwnenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkweqnan_mvl v256f64:$vz, i32:$vl), (vfmkweqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkweqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkweqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwgenan_mvl v256f64:$vz, i32:$vl), (vfmkwgenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwlenan_mvl v256f64:$vz, i32:$vl), (vfmkwlenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkwlenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkwlenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlogt_mvl v256f64:$vz, i32:$vl), (pvfmkwlogt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupgt_mvl v256f64:$vz, i32:$vl), (pvfmkwupgt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlogt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlogt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlolt_mvl v256f64:$vz, i32:$vl), (pvfmkwlolt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwuplt_mvl v256f64:$vz, i32:$vl), (pvfmkwuplt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlolt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlolt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwuplt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwuplt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlone_mvl v256f64:$vz, i32:$vl), (pvfmkwlone_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupne_mvl v256f64:$vz, i32:$vl), (pvfmkwupne_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlone_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlone_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupne_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupne_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloeq_mvl v256f64:$vz, i32:$vl), (pvfmkwloeq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupeq_mvl v256f64:$vz, i32:$vl), (pvfmkwupeq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwloeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloge_mvl v256f64:$vz, i32:$vl), (pvfmkwloge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupge_mvl v256f64:$vz, i32:$vl), (pvfmkwupge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwloge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlole_mvl v256f64:$vz, i32:$vl), (pvfmkwlole_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwuple_mvl v256f64:$vz, i32:$vl), (pvfmkwuple_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlole_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlole_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwuple_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwuple_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlonum_mvl v256f64:$vz, i32:$vl), (pvfmkwlonum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupnum_mvl v256f64:$vz, i32:$vl), (pvfmkwupnum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlonum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlonum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlonan_mvl v256f64:$vz, i32:$vl), (pvfmkwlonan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupnan_mvl v256f64:$vz, i32:$vl), (pvfmkwupnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlonan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlonan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlogtnan_mvl v256f64:$vz, i32:$vl), (pvfmkwlogtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupgtnan_mvl v256f64:$vz, i32:$vl), (pvfmkwupgtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlogtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlogtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloltnan_mvl v256f64:$vz, i32:$vl), (pvfmkwloltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupltnan_mvl v256f64:$vz, i32:$vl), (pvfmkwupltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwloltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlonenan_mvl v256f64:$vz, i32:$vl), (pvfmkwlonenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupnenan_mvl v256f64:$vz, i32:$vl), (pvfmkwupnenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlonenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlonenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloeqnan_mvl v256f64:$vz, i32:$vl), (pvfmkwloeqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupeqnan_mvl v256f64:$vz, i32:$vl), (pvfmkwupeqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwloeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwloeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlogenan_mvl v256f64:$vz, i32:$vl), (pvfmkwlogenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupgenan_mvl v256f64:$vz, i32:$vl), (pvfmkwupgenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlogenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlogenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwupgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwupgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlolenan_mvl v256f64:$vz, i32:$vl), (pvfmkwlolenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwuplenan_mvl v256f64:$vz, i32:$vl), (pvfmkwuplenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlolenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwlolenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwuplenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkwuplenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwgt_Mvl v256f64:$vz, i32:$vl), (pvfmkwgt_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwgt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwgt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlt_Mvl v256f64:$vz, i32:$vl), (pvfmkwlt_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwlt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwne_Mvl v256f64:$vz, i32:$vl), (pvfmkwne_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwne_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwne_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkweq_Mvl v256f64:$vz, i32:$vl), (pvfmkweq_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkweq_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkweq_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwge_Mvl v256f64:$vz, i32:$vl), (pvfmkwge_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwge_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwge_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwle_Mvl v256f64:$vz, i32:$vl), (pvfmkwle_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwle_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwle_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwnum_Mvl v256f64:$vz, i32:$vl), (pvfmkwnum_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwnum_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwnum_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwnan_Mvl v256f64:$vz, i32:$vl), (pvfmkwnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwgtnan_Mvl v256f64:$vz, i32:$vl), (pvfmkwgtnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwgtnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwgtnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwltnan_Mvl v256f64:$vz, i32:$vl), (pvfmkwltnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwltnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwltnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwnenan_Mvl v256f64:$vz, i32:$vl), (pvfmkwnenan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwnenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwnenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkweqnan_Mvl v256f64:$vz, i32:$vl), (pvfmkweqnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkweqnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkweqnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwgenan_Mvl v256f64:$vz, i32:$vl), (pvfmkwgenan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwgenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwgenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlenan_Mvl v256f64:$vz, i32:$vl), (pvfmkwlenan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkwlenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkwlenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdgt_mvl v256f64:$vz, i32:$vl), (vfmkdgt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdlt_mvl v256f64:$vz, i32:$vl), (vfmkdlt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdlt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdlt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdne_mvl v256f64:$vz, i32:$vl), (vfmkdne_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdne_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdne_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdeq_mvl v256f64:$vz, i32:$vl), (vfmkdeq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdge_mvl v256f64:$vz, i32:$vl), (vfmkdge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdle_mvl v256f64:$vz, i32:$vl), (vfmkdle_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdle_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdle_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdnum_mvl v256f64:$vz, i32:$vl), (vfmkdnum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdnan_mvl v256f64:$vz, i32:$vl), (vfmkdnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdgtnan_mvl v256f64:$vz, i32:$vl), (vfmkdgtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdltnan_mvl v256f64:$vz, i32:$vl), (vfmkdltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdnenan_mvl v256f64:$vz, i32:$vl), (vfmkdnenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdeqnan_mvl v256f64:$vz, i32:$vl), (vfmkdeqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdgenan_mvl v256f64:$vz, i32:$vl), (vfmkdgenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdlenan_mvl v256f64:$vz, i32:$vl), (vfmkdlenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkdlenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkdlenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksgt_mvl v256f64:$vz, i32:$vl), (vfmksgt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkslt_mvl v256f64:$vz, i32:$vl), (vfmkslt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkslt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkslt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksne_mvl v256f64:$vz, i32:$vl), (vfmksne_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksne_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksne_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkseq_mvl v256f64:$vz, i32:$vl), (vfmkseq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkseq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkseq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksge_mvl v256f64:$vz, i32:$vl), (vfmksge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksle_mvl v256f64:$vz, i32:$vl), (vfmksle_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksle_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksle_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksnum_mvl v256f64:$vz, i32:$vl), (vfmksnum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksnan_mvl v256f64:$vz, i32:$vl), (vfmksnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksgtnan_mvl v256f64:$vz, i32:$vl), (vfmksgtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksltnan_mvl v256f64:$vz, i32:$vl), (vfmksltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksnenan_mvl v256f64:$vz, i32:$vl), (vfmksnenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkseqnan_mvl v256f64:$vz, i32:$vl), (vfmkseqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkseqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkseqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksgenan_mvl v256f64:$vz, i32:$vl), (vfmksgenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmksgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmksgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkslenan_mvl v256f64:$vz, i32:$vl), (vfmkslenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_vfmkslenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (vfmkslenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslogt_mvl v256f64:$vz, i32:$vl), (pvfmkslogt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupgt_mvl v256f64:$vz, i32:$vl), (pvfmksupgt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslogt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslogt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupgt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslolt_mvl v256f64:$vz, i32:$vl), (pvfmkslolt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksuplt_mvl v256f64:$vz, i32:$vl), (pvfmksuplt_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslolt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslolt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksuplt_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksuplt_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslone_mvl v256f64:$vz, i32:$vl), (pvfmkslone_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupne_mvl v256f64:$vz, i32:$vl), (pvfmksupne_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslone_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslone_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupne_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupne_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloeq_mvl v256f64:$vz, i32:$vl), (pvfmksloeq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupeq_mvl v256f64:$vz, i32:$vl), (pvfmksupeq_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksloeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupeq_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloge_mvl v256f64:$vz, i32:$vl), (pvfmksloge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupge_mvl v256f64:$vz, i32:$vl), (pvfmksupge_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksloge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupge_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupge_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslole_mvl v256f64:$vz, i32:$vl), (pvfmkslole_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksuple_mvl v256f64:$vz, i32:$vl), (pvfmksuple_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslole_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslole_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksuple_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksuple_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslonum_mvl v256f64:$vz, i32:$vl), (pvfmkslonum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupnum_mvl v256f64:$vz, i32:$vl), (pvfmksupnum_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslonum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslonum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupnum_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslonan_mvl v256f64:$vz, i32:$vl), (pvfmkslonan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupnan_mvl v256f64:$vz, i32:$vl), (pvfmksupnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslonan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslonan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslogtnan_mvl v256f64:$vz, i32:$vl), (pvfmkslogtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupgtnan_mvl v256f64:$vz, i32:$vl), (pvfmksupgtnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslogtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslogtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupgtnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloltnan_mvl v256f64:$vz, i32:$vl), (pvfmksloltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupltnan_mvl v256f64:$vz, i32:$vl), (pvfmksupltnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksloltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupltnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslonenan_mvl v256f64:$vz, i32:$vl), (pvfmkslonenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupnenan_mvl v256f64:$vz, i32:$vl), (pvfmksupnenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslonenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslonenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupnenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloeqnan_mvl v256f64:$vz, i32:$vl), (pvfmksloeqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupeqnan_mvl v256f64:$vz, i32:$vl), (pvfmksupeqnan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksloeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksloeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupeqnan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslogenan_mvl v256f64:$vz, i32:$vl), (pvfmkslogenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupgenan_mvl v256f64:$vz, i32:$vl), (pvfmksupgenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslogenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslogenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksupgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksupgenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslolenan_mvl v256f64:$vz, i32:$vl), (pvfmkslolenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksuplenan_mvl v256f64:$vz, i32:$vl), (pvfmksuplenan_mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslolenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmkslolenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksuplenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl), (pvfmksuplenan_mvml v256f64:$vz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksgt_Mvl v256f64:$vz, i32:$vl), (pvfmksgt_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksgt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksgt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslt_Mvl v256f64:$vz, i32:$vl), (pvfmkslt_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkslt_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksne_Mvl v256f64:$vz, i32:$vl), (pvfmksne_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksne_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksne_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkseq_Mvl v256f64:$vz, i32:$vl), (pvfmkseq_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkseq_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkseq_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksge_Mvl v256f64:$vz, i32:$vl), (pvfmksge_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksge_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksge_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksle_Mvl v256f64:$vz, i32:$vl), (pvfmksle_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksle_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksle_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksnum_Mvl v256f64:$vz, i32:$vl), (pvfmksnum_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksnum_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksnum_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksnan_Mvl v256f64:$vz, i32:$vl), (pvfmksnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksgtnan_Mvl v256f64:$vz, i32:$vl), (pvfmksgtnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksgtnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksgtnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksltnan_Mvl v256f64:$vz, i32:$vl), (pvfmksltnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksltnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksltnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksnenan_Mvl v256f64:$vz, i32:$vl), (pvfmksnenan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksnenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksnenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkseqnan_Mvl v256f64:$vz, i32:$vl), (pvfmkseqnan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkseqnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkseqnan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksgenan_Mvl v256f64:$vz, i32:$vl), (pvfmksgenan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmksgenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmksgenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslenan_Mvl v256f64:$vz, i32:$vl), (pvfmkslenan_Mvl v256f64:$vz, i32:$vl)>; +def : Pat<(int_ve_vl_pvfmkslenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl), (pvfmkslenan_MvMl v256f64:$vz, v8i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsumwsx_vvl v256f64:$vy, i32:$vl), (vsumwsx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsumwsx_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vsumwsx_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsumwzx_vvl v256f64:$vy, i32:$vl), (vsumwzx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsumwzx_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vsumwzx_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsuml_vvl v256f64:$vy, i32:$vl), (vsuml_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vsuml_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vsuml_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfsumd_vvl v256f64:$vy, i32:$vl), (vfsumd_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfsumd_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vfsumd_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vfsums_vvl v256f64:$vy, i32:$vl), (vfsums_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfsums_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vfsums_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswfstsx_vvl v256f64:$vy, i32:$vl), (vrmaxswfstsx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswfstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrmaxswfstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswlstsx_vvl v256f64:$vy, i32:$vl), (vrmaxswlstsx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswlstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrmaxswlstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswfstzx_vvl v256f64:$vy, i32:$vl), (vrmaxswfstzx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswfstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrmaxswfstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswlstzx_vvl v256f64:$vy, i32:$vl), (vrmaxswlstzx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxswlstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrmaxswlstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswfstsx_vvl v256f64:$vy, i32:$vl), (vrminswfstsx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswfstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrminswfstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswlstsx_vvl v256f64:$vy, i32:$vl), (vrminswlstsx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswlstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrminswlstsx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswfstzx_vvl v256f64:$vy, i32:$vl), (vrminswfstzx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswfstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrminswfstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswlstzx_vvl v256f64:$vy, i32:$vl), (vrminswlstzx_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrminswlstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrminswlstzx_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxslfst_vvl v256f64:$vy, i32:$vl), (vrmaxslfst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxslfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrmaxslfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxsllst_vvl v256f64:$vy, i32:$vl), (vrmaxsllst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrmaxsllst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrmaxsllst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrminslfst_vvl v256f64:$vy, i32:$vl), (vrminslfst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrminslfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrminslfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrminsllst_vvl v256f64:$vy, i32:$vl), (vrminsllst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrminsllst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vrminsllst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxdfst_vvl v256f64:$vy, i32:$vl), (vfrmaxdfst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxdfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrmaxdfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxdlst_vvl v256f64:$vy, i32:$vl), (vfrmaxdlst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxdlst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrmaxdlst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxsfst_vvl v256f64:$vy, i32:$vl), (vfrmaxsfst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxsfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrmaxsfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxslst_vvl v256f64:$vy, i32:$vl), (vfrmaxslst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmaxslst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrmaxslst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmindfst_vvl v256f64:$vy, i32:$vl), (vfrmindfst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmindfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrmindfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmindlst_vvl v256f64:$vy, i32:$vl), (vfrmindlst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrmindlst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrmindlst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrminsfst_vvl v256f64:$vy, i32:$vl), (vfrminsfst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrminsfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrminsfst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vfrminslst_vvl v256f64:$vy, i32:$vl), (vfrminslst_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vfrminslst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl), (vfrminslst_vvvl v256f64:$vy, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vrand_vvl v256f64:$vy, i32:$vl), (vrand_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrand_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vrand_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vror_vvl v256f64:$vy, i32:$vl), (vror_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vror_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vror_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vrxor_vvl v256f64:$vy, i32:$vl), (vrxor_vvl v256f64:$vy, i32:$vl)>; +def : Pat<(int_ve_vl_vrxor_vvml v256f64:$vy, v4i64:$vm, i32:$vl), (vrxor_vvml v256f64:$vy, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgt_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgt_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgt_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgt_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgt_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgt_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgt_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgt_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgt_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgt_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgt_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgt_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgt_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgt_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgt_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgt_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgt_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtnc_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtnc_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtnc_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtnc_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtnc_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtnc_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtnc_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtnc_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtnc_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtnc_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtnc_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtnc_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtnc_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtu_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtu_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtu_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtu_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtu_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtu_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtu_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtu_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtu_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtu_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtu_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtu_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtu_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtu_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtu_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtu_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtu_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtunc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtunc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtunc_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtunc_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtunc_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtunc_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtunc_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtunc_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtunc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtunc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtunc_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtunc_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtunc_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtunc_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtunc_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtunc_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtunc_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtlsx_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtlsx_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtlsx_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlsx_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtlsx_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtlsx_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtlsx_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlsx_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtlsx_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsx_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlsx_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsx_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtlsx_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsx_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlsx_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsx_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsx_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtlsxnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtlsxnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtlsxnc_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlsxnc_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtlsxnc_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtlsxnc_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtlsxnc_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlsxnc_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtlsxnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsxnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlsxnc_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsxnc_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtlsxnc_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsxnc_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlsxnc_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlsxnc_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlsxnc_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtlzx_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtlzx_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtlzx_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlzx_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtlzx_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtlzx_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtlzx_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlzx_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtlzx_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzx_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlzx_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzx_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtlzx_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzx_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlzx_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzx_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzx_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vgtlzxnc_vvssl v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl), (vgtlzxnc_vvssvl v256f64:$vy, i64:$sy, i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vgtlzxnc_vvsZl v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlzxnc_vvsZvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vgtlzxnc_vvIsl v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl), (vgtlzxnc_vvIsvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vgtlzxnc_vvIZl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl), (vgtlzxnc_vvIZvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vgtlzxnc_vvssml v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzxnc_vvssmvl v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlzxnc_vvsZml v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzxnc_vvsZmvl v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vgtlzxnc_vvIsml v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzxnc_vvIsmvl v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vgtlzxnc_vvIZml v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vgtlzxnc_vvssmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl), (vgtlzxnc_vvIZmvl v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, v256f64:$vd, i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vsc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vsc_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vsc_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vsc_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vsc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsc_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vsc_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsc_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscnc_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscnc_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscnc_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscnc_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscnc_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscnc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscnc_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscot_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscot_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscot_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscot_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscot_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscot_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscncot_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscncot_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscncot_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscncot_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscncot_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscncot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscncot_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscu_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscu_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscu_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscu_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscu_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscu_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscu_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscu_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscu_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscunc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscunc_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscunc_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscunc_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscunc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscunc_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscunc_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscunc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscunc_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscuot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscuot_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscuot_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscuot_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscuot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscuot_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscuot_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscuot_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscuncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscuncot_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscuncot_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscuncot_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscuncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscuncot_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscuncot_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscuncot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscuncot_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vscl_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vscl_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vscl_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vscl_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vscl_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscl_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vscl_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vscl_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vscl_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vsclnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vsclnc_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vsclnc_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vsclnc_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vsclnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsclnc_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vsclnc_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclnc_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsclnc_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vsclot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vsclot_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vsclot_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vsclot_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vsclot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsclot_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vsclot_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsclot_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl), (vsclncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl), (vsclncot_vvsZl v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl), (vsclncot_vvIsl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl), (vsclncot_vvIZl v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl), (vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsclncot_vvsZml v256f64:$vx, v256f64:$vy, i64:$sy, (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl), (vsclncot_vvIsml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), i64:$sz, v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_vsclncot_vvssml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl), (vsclncot_vvIZml v256f64:$vx, v256f64:$vy, (i64 simm7:$I), (i64 simm7:$Z), v4i64:$vm, i32:$vl)>; +def : Pat<(int_ve_vl_andm_mmm v4i64:$vmy, v4i64:$vmz), (andm_mmm v4i64:$vmy, v4i64:$vmz)>; +def : Pat<(int_ve_vl_andm_MMM v8i64:$vmy, v8i64:$vmz), (andm_MMM v8i64:$vmy, v8i64:$vmz)>; +def : Pat<(int_ve_vl_orm_mmm v4i64:$vmy, v4i64:$vmz), (orm_mmm v4i64:$vmy, v4i64:$vmz)>; +def : Pat<(int_ve_vl_orm_MMM v8i64:$vmy, v8i64:$vmz), (orm_MMM v8i64:$vmy, v8i64:$vmz)>; +def : Pat<(int_ve_vl_xorm_mmm v4i64:$vmy, v4i64:$vmz), (xorm_mmm v4i64:$vmy, v4i64:$vmz)>; +def : Pat<(int_ve_vl_xorm_MMM v8i64:$vmy, v8i64:$vmz), (xorm_MMM v8i64:$vmy, v8i64:$vmz)>; +def : Pat<(int_ve_vl_eqvm_mmm v4i64:$vmy, v4i64:$vmz), (eqvm_mmm v4i64:$vmy, v4i64:$vmz)>; +def : Pat<(int_ve_vl_eqvm_MMM v8i64:$vmy, v8i64:$vmz), (eqvm_MMM v8i64:$vmy, v8i64:$vmz)>; +def : Pat<(int_ve_vl_nndm_mmm v4i64:$vmy, v4i64:$vmz), (nndm_mmm v4i64:$vmy, v4i64:$vmz)>; +def : Pat<(int_ve_vl_nndm_MMM v8i64:$vmy, v8i64:$vmz), (nndm_MMM v8i64:$vmy, v8i64:$vmz)>; +def : Pat<(int_ve_vl_negm_mm v4i64:$vmy), (negm_mm v4i64:$vmy)>; +def : Pat<(int_ve_vl_negm_MM v8i64:$vmy), (negm_MM v8i64:$vmy)>; +def : Pat<(int_ve_vl_pcvm_sml v4i64:$vmy, i32:$vl), (pcvm_sml v4i64:$vmy, i32:$vl)>; +def : Pat<(int_ve_vl_lzvm_sml v4i64:$vmy, i32:$vl), (lzvm_sml v4i64:$vmy, i32:$vl)>; +def : Pat<(int_ve_vl_tovm_sml v4i64:$vmy, i32:$vl), (tovm_sml v4i64:$vmy, i32:$vl)>; diff --git a/llvm/lib/Target/VE/VEInstrIntrinsicVL.td b/llvm/lib/Target/VE/VEInstrIntrinsicVL.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrIntrinsicVL.td @@ -0,0 +1,29 @@ +// Pattern Matchings for VEL Intrinsics + +def : Pat<(int_ve_vl_svob), (SVOB)>; +def : Pat<(i64 (int_ve_vl_pack_f32p ADDRri:$addr0, ADDRri:$addr1)), + (ORrr (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + (LDUri MEMri:$addr0), sub_f32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + (LDLUri MEMri:$addr1), sub_i32))>; + +def : Pat<(i64 (int_ve_vl_pack_f32a ADDRri:$addr)), + (i64 (MPYrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32), + (LEASLrzi (ANDrm0 (LEAzzi (LO32 (i64 0x0000000100000001))), 32), + (HI32 (i64 0x0000000100000001)))))>; + +def : Pat<(v4i64 (int_ve_vl_extract_vm512u v8i64:$vm)), + (v4i64 (EXTRACT_SUBREG v8i64:$vm, sub_vm_even))>; + +def : Pat<(v4i64 (int_ve_vl_extract_vm512l v8i64:$vm)), + (v4i64 (EXTRACT_SUBREG v8i64:$vm, sub_vm_odd))>; + +def : Pat<(v8i64 (int_ve_vl_insert_vm512u v8i64:$vmx, v4i64:$vmy)), + (v8i64 (INSERT_SUBREG v8i64:$vmx, v4i64:$vmy, sub_vm_even))>; + +def : Pat<(v8i64 (int_ve_vl_insert_vm512l v8i64:$vmx, v4i64:$vmy)), + (v8i64 (INSERT_SUBREG v8i64:$vmx, v4i64:$vmy, sub_vm_odd))>; + + +include "VEInstrIntrinsicVL.gen.td" diff --git a/llvm/lib/Target/VE/VEInstrPatternsVec.td b/llvm/lib/Target/VE/VEInstrPatternsVec.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrPatternsVec.td @@ -0,0 +1,616 @@ +//===----------------------------------------------------------------------===// +// Vector Instruction Patterns +//===----------------------------------------------------------------------===// + +// Pattern Matchings for Generic Vector Instructions + +// Pattern Fragments for sextload/zextload/truncstore of vector types + +def extloadv256i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v256i32; +}]>; +def sextloadv256i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v256i32; +}]>; +def zextloadv256i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v256i32; +}]>; +def extloadv128i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v128i32; +}]>; +def sextloadv128i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v128i32; +}]>; +def zextloadv128i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v128i32; +}]>; +def extloadv64i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v64i32; +}]>; +def sextloadv64i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v64i32; +}]>; +def zextloadv64i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v64i32; +}]>; +def extloadv32i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v32i32; +}]>; +def sextloadv32i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v32i32; +}]>; +def zextloadv32i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v32i32; +}]>; +def extloadv16i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v16i32; +}]>; +def sextloadv16i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v16i32; +}]>; +def zextloadv16i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v16i32; +}]>; +def extloadv8i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v8i32; +}]>; +def sextloadv8i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v8i32; +}]>; +def zextloadv8i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v8i32; +}]>; +def extloadv4i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v4i32; +}]>; +def sextloadv4i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v4i32; +}]>; +def zextloadv4i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v4i32; +}]>; +def extloadv2i32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v2i32; +}]>; +def sextloadv2i32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v2i32; +}]>; +def zextloadv2i32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v2i32; +}]>; +def truncstorev256i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v256i32; +}]>; +def truncstorev128i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v128i32; +}]>; +def truncstorev64i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v64i32; +}]>; +def truncstorev32i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v32i32; +}]>; +def truncstorev16i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v16i32; +}]>; +def truncstorev8i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v8i32; +}]>; +def truncstorev4i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v4i32; +}]>; +def truncstorev2i32 : PatFrag<(ops node:$val, node:$ptr), + (truncstore node:$val, node:$ptr), [{ + return cast(N)->getMemoryVT() == MVT::v2i32; +}]>; + +// Load and store for all vector types +// v2i32, v2i64, v2f32, v2f64, v4i32, v4i64, v4f32, v4f64, +// v8i32, v8i64, v8f32, v8f64, v16i32, v16i64, v16f32, v16f64, +// v32i32, v32i64, v32f32, v32f64, v64i32, v64i64, v64f32, v64f64, +// v128i32, v128i64, v128f32, v128f64, v256i32, v256i64, v256f32, v256f64, +// v512i32, v512f32. + +def : Pat<(v512i32 (load I64:$addr)), + (v512i32 (vld_vIsl 8, $addr, (LEA32zzi 256)))>; + +def : Pat<(v512f32 (load I64:$addr)), + (v512f32 (vld_vIsl 8, $addr, (LEA32zzi 256)))>; + +multiclass load_for_vector_length { + def : Pat<(!cast("v" # !cast(length) # "i32") + (load I64:$addr)), + (vldlsx_vIsl 4, $addr, (LEA32zzi length))>; + def : Pat<(!cast("v" # !cast(length) # "f32") + (load I64:$addr)), + (vldu_vIsl 4, $addr, (LEA32zzi length))>; + def : Pat<(!cast("v" # !cast(length) # "f64") + (load I64:$addr)), + (vld_vIsl 8, $addr, (LEA32zzi length))>; + def : Pat<(!cast("v" # !cast(length) # "i64") + (load I64:$addr)), + (vld_vIsl 8, $addr, (LEA32zzi length))>; +} + +defm : load_for_vector_length<256>; +defm : load_for_vector_length<128>; +defm : load_for_vector_length<64>; +defm : load_for_vector_length<32>; +defm : load_for_vector_length<16>; +defm : load_for_vector_length<8>; +defm : load_for_vector_length<4>; +defm : load_for_vector_length<2>; + +multiclass store_for_vector_length { + def : Pat<(store !cast("v" # !cast(length) # "i32"):$vx, + I64:$addr), + (vstl_vIsl !cast("v" # !cast(length) # "i32"):$vx, + 4, $addr, (LEA32zzi length))>; + def : Pat<(store !cast("v" # !cast(length) # "i64"):$vx, + I64:$addr), + (vst_vIsl !cast("v" # !cast(length) # "i64"):$vx, + 8, $addr, (LEA32zzi length))>; +} + +defm : store_for_vector_length<256>; +defm : store_for_vector_length<128>; +defm : store_for_vector_length<64>; +defm : store_for_vector_length<32>; +defm : store_for_vector_length<16>; +defm : store_for_vector_length<8>; +defm : store_for_vector_length<4>; +defm : store_for_vector_length<2>; + +// Load for +// v256i1, v512i1 + +def : Pat<(v256i1 (load I64:$addr)), + (v256i1 (lvm_mmIs (lvm_mmIs (lvm_mmIs (lvm_mmIs (v256i1 (IMPLICIT_DEF)), + 0, (LDSri $addr, 0)), + 1, (LDSri $addr, 8)), + 2, (LDSri $addr, 16)), + 3, (LDSri $addr, 24)))>; + +def : Pat<(v512i1 (load I64:$addr)), + (v512i1 (lvm_MMIs (lvm_MMIs (lvm_MMIs (lvm_MMIs + (lvm_MMIs (lvm_MMIs (lvm_MMIs (lvm_MMIs (v512i1 (IMPLICIT_DEF)), + 0, (LDSri $addr, 0)), + 1, (LDSri $addr, 8)), + 2, (LDSri $addr, 16)), + 3, (LDSri $addr, 24)), + 4, (LDSri $addr, 32)), + 5, (LDSri $addr, 40)), + 6, (LDSri $addr, 48)), + 7, (LDSri $addr, 56)))>; + +// Store for v256i1, v512i1 are implemented in 2 ways. These STVM/STVM512 +// pseudo instruction is used for frameindex related load/store instructions. +// Custom Lowering is used for other load/store instructions. + +def : Pat<(store v256i1:$vx, ADDRri:$addr), + (STVMri ADDRri:$addr, $vx)>; + +def : Pat<(store v512i1:$vx, ADDRri:$addr), + (STVM512ri ADDRri:$addr, $vx)>; + +multiclass ext_for_vector_length { + def : Pat<(vi64 (sext vi32:$vx)), + (vaddswsx_vIvl 0, $vx, (LEA32zzi length))>; + def : Pat<(vi64 (zext vi32:$vx)), + (vaddswzx_vIvl 0, $vx, (LEA32zzi length))>; +} + +defm : ext_for_vector_length<256, v256i32, v256i64, v256i1>; +defm : ext_for_vector_length<128, v128i32, v128i64, v128i1>; +defm : ext_for_vector_length<64, v64i32, v64i64, v64i1>; +defm : ext_for_vector_length<32, v32i32, v32i64, v32i1>; +defm : ext_for_vector_length<16, v16i32, v16i64, v16i1>; +defm : ext_for_vector_length<8, v8i32, v8i64, v8i1>; +defm : ext_for_vector_length<4, v4i32, v4i64, v4i1>; +defm : ext_for_vector_length<2, v2i32, v2i64, v2i1>; + +// Bitconvert for vector registers + +def: Pat<(v512i32 (scalar_to_vector i32:$val)), + (v512i32 (lsv_vvss (v512i32 (IMPLICIT_DEF)), 0, + (SLLri + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), + 32)))>; +def: Pat<(v512f32 (scalar_to_vector f32:$val)), + (v512f32 (lsv_vvss (v512f32 (IMPLICIT_DEF)), 0, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32)))>; + +multiclass s2v_for_vector_length { + def: Pat<(vi32 (scalar_to_vector i32:$val)), + (lsv_vvss (vi32 (IMPLICIT_DEF)), 0, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32))>; + def: Pat<(vi64 (scalar_to_vector i64:$val)), + (lsv_vvss (vi64 (IMPLICIT_DEF)), 0, $val)>; + def: Pat<(vf32 (scalar_to_vector f32:$val)), + (lsv_vvss (vf32 (IMPLICIT_DEF)), 0, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32))>; + def: Pat<(vf64 (scalar_to_vector f64:$val)), + (lsv_vvss (vf64 (IMPLICIT_DEF)), 0, + (COPY_TO_REGCLASS $val, I64))>; +} + +defm : s2v_for_vector_length<256, v256i32, v256i64, v256f32, v256f64>; +defm : s2v_for_vector_length<128, v128i32, v128i64, v128f32, v128f64>; +defm : s2v_for_vector_length<64, v64i32, v64i64, v64f32, v64f64>; +defm : s2v_for_vector_length<32, v32i32, v32i64, v32f32, v32f64>; +defm : s2v_for_vector_length<16, v16i32, v16i64, v16f32, v16f64>; +defm : s2v_for_vector_length<8, v8i32, v8i64, v8f32, v8f64>; +defm : s2v_for_vector_length<4, v4i32, v4i64, v4f32, v4f64>; +defm : s2v_for_vector_length<2, v2i32, v2i64, v2f32, v2f64>; + +// Series of INSERT_VECOR_ELT for all VE vector types, +// v512i32 and v512f32 is expanded by LowerINSERT_VECTOR_ELT(). + +multiclass ive_for_vector_length { + def: Pat<(vi32 (insertelt vi32:$vec, i32:$val, uimm7:$idx)), + (lsv_vvss vi32:$vec, imm:$idx, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32))>; + def: Pat<(vi32 (insertelt vi32:$vec, i32:$val, i64:$idx)), + (lsv_vvss vi32:$vec, + (EXTRACT_SUBREG $idx, sub_i32), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32))>; +} + +defm : ive_for_vector_length<256, v256i32, v256i64, v256f32, v256f64>; +defm : ive_for_vector_length<128, v128i32, v128i64, v128f32, v128f64>; +defm : ive_for_vector_length<64, v64i32, v64i64, v64f32, v64f64>; +defm : ive_for_vector_length<32, v32i32, v32i64, v32f32, v32f64>; +defm : ive_for_vector_length<16, v16i32, v16i64, v16f32, v16f64>; +defm : ive_for_vector_length<8, v8i32, v8i64, v8f32, v8f64>; +defm : ive_for_vector_length<4, v4i32, v4i64, v4f32, v4f64>; +defm : ive_for_vector_length<2, v2i32, v2i64, v2f32, v2f64>; + +// Series of EXTRACT_VECOR_ELT for all VE vector types, +// v512i32 and v512f32 is expanded by LowerEXTRACT_VECTOR_ELT(). + +multiclass eve_for_vector_length { + def: Pat<(i32 (extractelt vi32:$vec, uimm7:$idx)), + (EXTRACT_SUBREG (lvsl_svs vi32:$vec, imm:$idx), sub_i32)>; +} + +defm : eve_for_vector_length<256, v256i32, v256i64, v256f32, v256f64>; +defm : eve_for_vector_length<128, v128i32, v128i64, v128f32, v128f64>; +defm : eve_for_vector_length<64, v64i32, v64i64, v64f32, v64f64>; +defm : eve_for_vector_length<32, v32i32, v32i64, v32f32, v32f64>; +defm : eve_for_vector_length<16, v16i32, v16i64, v16f32, v16f64>; +defm : eve_for_vector_length<8, v8i32, v8i64, v8f32, v8f64>; +defm : eve_for_vector_length<4, v4i32, v4i64, v4f32, v4f64>; +defm : eve_for_vector_length<2, v2i32, v2i64, v2f32, v2f64>; + +// Custom ISDs +// VEISD::VEC_SEQ - represents a vector sequence where the operand is the stride +// VEISD::VEC_BROADCAST - represents a vector splat of a scalar value into all vector lanes. + +def vec_seq : SDNode<"VEISD::VEC_SEQ", SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisInt<1>]>>; +def vec_broadcast : SDNode<"VEISD::VEC_BROADCAST", SDTypeProfile<1, 1, [SDTCisVec<0>]>>; + +// Broadcast + +def: Pat<(v512i32 (vec_broadcast i32:$val)), + (pvbrd_vsl + (ORrr + (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), 32), + (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), 32)), + (LEA32zzi 256))>; +def: Pat<(v512f32 (vec_broadcast f32:$val)), + (pvbrd_vsl + (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), 32)), + (LEA32zzi 256))>; + +multiclass vbrd_for_vector_length { + def : Pat<(vi32 (vec_broadcast i32:$sy)), + (vbrdl_vsl i32:$sy, (LEA32zzi length))>; + def : Pat<(vf32 (vec_broadcast f32:$sy)), + (vbrdu_vsl f32:$sy, (LEA32zzi length))>; + def : Pat<(vi64 (vec_broadcast i64:$sy)), + (vbrd_vsl i64:$sy, (LEA32zzi length))>; + def : Pat<(vf64 (vec_broadcast f64:$sy)), + (vbrd_vsl f64:$sy, (LEA32zzi length))>; +} + +defm : vbrd_for_vector_length<256, v256i32, v256i64, v256f32, v256f64>; +defm : vbrd_for_vector_length<128, v128i32, v128i64, v128f32, v128f64>; +defm : vbrd_for_vector_length<64, v64i32, v64i64, v64f32, v64f64>; +defm : vbrd_for_vector_length<32, v32i32, v32i64, v32f32, v32f64>; +defm : vbrd_for_vector_length<16, v16i32, v16i64, v16f32, v16f64>; +defm : vbrd_for_vector_length<8, v8i32, v8i64, v8f32, v8f64>; +defm : vbrd_for_vector_length<4, v4i32, v4i64, v4f32, v4f64>; +defm : vbrd_for_vector_length<2, v2i32, v2i64, v2f32, v2f64>; + +// Sequence + +multiclass vseq_for_vector_length { + def : Pat<(vi32 (vec_seq (i32 1))), + (pvseqlo_vl (LEA32zzi length))>; +} + +defm : vseq_for_vector_length<256, v256i32, v256i64>; +defm : vseq_for_vector_length<128, v128i32, v128i64>; +defm : vseq_for_vector_length<64, v64i32, v64i64>; +defm : vseq_for_vector_length<32, v32i32, v32i64>; +defm : vseq_for_vector_length<16, v16i32, v16i64>; +defm : vseq_for_vector_length<8, v8i32, v8i64>; +defm : vseq_for_vector_length<4, v4i32, v4i64>; +defm : vseq_for_vector_length<2, v2i32, v2i64>; + + +// Double-Precision Arithmetic +// +// fadd, fsub, fmul, and fdiv for all floating point vector types. + +def : Pat<(fadd (vec_broadcast f32:$val), v512f32:$vz), + (pvfadd_vsvl + (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), 32)), + v512f32:$vz, (LEA32zzi 256))>; +def : Pat<(fsub (vec_broadcast f32:$val), v512f32:$vz), + (pvfsub_vsvl + (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), 32)), + v512f32:$vz, (LEA32zzi 256))>; +def : Pat<(fmul (vec_broadcast f32:$val), v512f32:$vz), + (pvfmul_vsvl + (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_f32), 32)), + v512f32:$vz, (LEA32zzi 256))>; + +multiclass farith_for_vector_length { + def : Pat<(fadd vf32:$vy, vf32:$vz), + (vfadds_vvvl vf32:$vy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fadd vf64:$vy, vf64:$vz), + (vfaddd_vvvl vf64:$vy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fadd (vf32 (vec_broadcast f32:$sy)), vf32:$vz), + (vfadds_vsvl f32:$sy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fadd (vf64 (vec_broadcast f64:$sy)), vf64:$vz), + (vfaddd_vsvl f64:$sy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fsub vf32:$vy, vf32:$vz), + (vfsubs_vvvl vf32:$vy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fsub vf64:$vy, vf64:$vz), + (vfsubd_vvvl vf64:$vy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fsub (vf32 (vec_broadcast f32:$sy)), vf32:$vz), + (vfsubs_vsvl f32:$sy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fsub (vf64 (vec_broadcast f64:$sy)), vf64:$vz), + (vfsubd_vsvl f64:$sy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fmul vf32:$vy, vf32:$vz), + (vfmuls_vvvl vf32:$vy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fmul vf64:$vy, vf64:$vz), + (vfmuld_vvvl vf64:$vy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fmul (vf32 (vec_broadcast f32:$sy)), vf32:$vz), + (vfmuls_vsvl f32:$sy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fmul (vf64 (vec_broadcast f64:$sy)), vf64:$vz), + (vfmuld_vsvl f64:$sy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fdiv vf32:$vy, vf32:$vz), + (vfdivs_vvvl vf32:$vy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fdiv vf64:$vy, vf64:$vz), + (vfdivd_vvvl vf64:$vy, vf64:$vz, (LEA32zzi length))>; + def : Pat<(fdiv (vf32 (vec_broadcast f32:$sy)), vf32:$vz), + (vfdivs_vsvl f32:$sy, vf32:$vz, (LEA32zzi length))>; + def : Pat<(fdiv (vf64 (vec_broadcast f64:$sy)), vf64:$vz), + (vfdivd_vsvl f64:$sy, vf64:$vz, (LEA32zzi length))>; +} + +defm : farith_for_vector_length<256, v256f32, v256f64>; +defm : farith_for_vector_length<128, v128f32, v128f64>; +defm : farith_for_vector_length<64, v64f32, v64f64>; +defm : farith_for_vector_length<32, v32f32, v32f64>; +defm : farith_for_vector_length<16, v16f32, v16f64>; +defm : farith_for_vector_length<8, v8f32, v8f64>; +defm : farith_for_vector_length<4, v4f32, v4f64>; +defm : farith_for_vector_length<2, v2f32, v2f64>; + +// fneg for all floating point vector types. + + +// fma for all floating point vector types. + +def : Pat<(fma (v512f32 (vec_broadcast f32:$sy)), v512f32:$vw, v512f32:$vy), + (pvfmad_vvsvl v512f32:$vy, + (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_f32), 32)), + v512f32:$vw, + (LEA32zzi 256))>; +def : Pat<(fma v512f32:$vw, (v512f32 (vec_broadcast f32:$sy)), v512f32:$vy), + (pvfmad_vvsvl v512f32:$vy, + (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_f32), 32)), + v512f32:$vw, + (LEA32zzi 256))>; +def : Pat<(fma v512f32:$vz, v512f32:$vw, (v512f32 (vec_broadcast f32:$sy))), + (pvfmad_vsvvl (ORrr + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_f32), + (SRLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_f32), 32)), + v512f32:$vz, v512f32:$vw, + (LEA32zzi 256))>; + +multiclass fma_for_vector_length { + def : Pat<(fma (vf64 (vec_broadcast f64:$sy)), vf64:$vw, vf64:$vy), + (vfmadd_vvsvl vf64:$vy, f64:$sy, vf64:$vw, (LEA32zzi length))>; + def : Pat<(fma vf64:$vw, (vf64 (vec_broadcast f64:$sy)), vf64:$vy), + (vfmadd_vvsvl vf64:$vy, f64:$sy, vf64:$vw, (LEA32zzi length))>; + def : Pat<(fma vf64:$vz, vf64:$vw, (vf64 (vec_broadcast f64:$sy))), + (vfmadd_vsvvl f64:$sy, vf64:$vz, vf64:$vw, (LEA32zzi length))>; + def : Pat<(fma vf64:$vz, vf64:$vw, vf64:$vy), + (vfmadd_vvvvl vf64:$vy, vf64:$vz, vf64:$vw, (LEA32zzi length))>; + def : Pat<(fma (vf32 (vec_broadcast f32:$sy)), vf32:$vw, vf32:$vy), + (vfmads_vvsvl vf32:$vy, f32:$sy, vf32:$vw, (LEA32zzi length))>; + def : Pat<(fma vf32:$vw, (vf32 (vec_broadcast f32:$sy)), vf32:$vy), + (vfmads_vvsvl vf32:$vy, f32:$sy, vf32:$vw, (LEA32zzi length))>; + def : Pat<(fma vf32:$vz, vf32:$vw, (vf32 (vec_broadcast f32:$sy))), + (vfmads_vsvvl f32:$sy, vf32:$vz, vf32:$vw, (LEA32zzi length))>; + def : Pat<(fma vf32:$vz, vf32:$vw, vf32:$vy), + (vfmads_vvvvl vf32:$vy, vf32:$vz, vf32:$vw, (LEA32zzi length))>; +} + +defm : fma_for_vector_length<256, v256f32, v256f64>; +defm : fma_for_vector_length<128, v128f32, v128f64>; +defm : fma_for_vector_length<64, v64f32, v64f64>; +defm : fma_for_vector_length<32, v32f32, v32f64>; +defm : fma_for_vector_length<16, v16f32, v16f64>; +defm : fma_for_vector_length<8, v8f32, v8f64>; +defm : fma_for_vector_length<4, v4f32, v4f64>; +defm : fma_for_vector_length<2, v2f32, v2f64>; + +// Integer Arithmetic +// +// add and sub for v512i32 +// add, sub, mul, sdiv, and udiv for other integer vector types. + +def : Pat<(add (vec_broadcast i32:$val), v512i32:$vz), + (pvadds_vsvl + (ORrr + (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), 32), + (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), 32)), + v512i32:$vz, (LEA32zzi 256))>; +def : Pat<(sub (vec_broadcast i32:$val), v512i32:$vz), + (pvsubs_vsvl + (ORrr + (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), 32), + (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $val, sub_i32), 32)), + v512i32:$vz, (LEA32zzi 256))>; + +multiclass arith_for_vector_length { + def : Pat<(add vi32:$vy, vi32:$vz), + (vaddswsx_vvvl vi32:$vy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(add vi64:$vy, vi64:$vz), + (vaddsl_vvvl vi64:$vy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(add (vi32 (vec_broadcast i32:$sy)), vi32:$vz), + (vaddswsx_vsvl i32:$sy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(add (vi64 (vec_broadcast i64:$sy)), vi64:$vz), + (vaddsl_vsvl i64:$sy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(sub vi32:$vy, vi32:$vz), + (vsubswsx_vvvl vi32:$vy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(sub vi64:$vy, vi64:$vz), + (vsubsl_vvvl vi64:$vy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(sub (vi32 (vec_broadcast i32:$sy)), vi32:$vz), + (vsubswsx_vsvl i32:$sy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(sub (vi64 (vec_broadcast i64:$sy)), vi64:$vz), + (vsubsl_vsvl i64:$sy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(mul vi32:$vy, vi32:$vz), + (vmulswsx_vvvl vi32:$vy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(mul vi64:$vy, vi64:$vz), + (vmulsl_vvvl vi64:$vy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(mul (vi32 (vec_broadcast i32:$sy)), vi32:$vz), + (vmulswsx_vsvl i32:$sy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(mul (vi64 (vec_broadcast i64:$sy)), vi64:$vz), + (vmulsl_vsvl i64:$sy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(sdiv vi32:$vy, vi32:$vz), + (vdivswsx_vvvl vi32:$vy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(sdiv vi64:$vy, vi64:$vz), + (vdivsl_vvvl vi64:$vy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(sdiv (vi32 (vec_broadcast i32:$sy)), vi32:$vz), + (vdivswsx_vsvl i32:$sy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(sdiv (vi64 (vec_broadcast i64:$sy)), vi64:$vz), + (vdivsl_vsvl i64:$sy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(udiv vi32:$vy, vi32:$vz), + (vdivuw_vvvl vi32:$vy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(udiv vi64:$vy, vi64:$vz), + (vdivul_vvvl vi64:$vy, vi64:$vz, (LEA32zzi length))>; + def : Pat<(udiv (vi32 (vec_broadcast i32:$sy)), vi32:$vz), + (vdivuw_vsvl i32:$sy, vi32:$vz, (LEA32zzi length))>; + def : Pat<(udiv (vi64 (vec_broadcast i64:$sy)), vi64:$vz), + (vdivul_vsvl i64:$sy, vi64:$vz, (LEA32zzi length))>; +} + +defm : arith_for_vector_length<256, v256i32, v256i64>; +defm : arith_for_vector_length<128, v128i32, v128i64>; +defm : arith_for_vector_length<64, v64i32, v64i64>; +defm : arith_for_vector_length<32, v32i32, v32i64>; +defm : arith_for_vector_length<16, v16i32, v16i64>; +defm : arith_for_vector_length<8, v8i32, v8i64>; +defm : arith_for_vector_length<4, v4i32, v4i64>; +defm : arith_for_vector_length<2, v2i32, v2i64>; + +// Logic + +multiclass logic_for_vector_length { + def : Pat<(and vi32:$vx, vi32:$vy), + (pvandlo_vvvl vi32:$vx, vi32:$vy, (LEA32zzi length))>; +} + +defm : logic_for_vector_length<256, v256i32, v256i64>; +defm : logic_for_vector_length<128, v128i32, v128i64>; +defm : logic_for_vector_length<64, v64i32, v64i64>; +defm : logic_for_vector_length<32, v32i32, v32i64>; +defm : logic_for_vector_length<16, v16i32, v16i64>; +defm : logic_for_vector_length<8, v8i32, v8i64>; +defm : logic_for_vector_length<4, v4i32, v4i64>; +defm : logic_for_vector_length<2, v2i32, v2i64>; + +// Shifts +// +// shl, srl, and sra for v512i32 +// shl, srl, and sra for other integer vector types. + +def : Pat<(shl v512i32:$vx, (v512i32 (vec_broadcast i32:$sy))), + (pvsll_vvsl v512i32:$vx, + (ORrr + (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), 32), + (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), 32)), + (LEA32zzi 256))>; +def : Pat<(srl v512i32:$vx, (v512i32 (vec_broadcast i32:$sy))), + (pvsrl_vvsl v512i32:$vx, + (ORrr + (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), 32), + (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), 32)), + (LEA32zzi 256))>; +def : Pat<(sra v512i32:$vx, (v512i32 (vec_broadcast i32:$sy))), + (pvsra_vvsl v512i32:$vx, + (ORrr + (SLLri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), 32), + (ANDrm0 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), 32)), + (LEA32zzi 256))>; + +multiclass shift_for_vector_length { + def : Pat<(shl vi64:$vx, (vi64 (vec_broadcast i64:$sy))), + (vslal_vvsl vi64:$vx, i64:$sy, (LEA32zzi length))>; + def : Pat<(srl vi64:$vx, (vi64 (vec_broadcast i64:$sy))), + (vsrl_vvsl vi64:$vx, i64:$sy, (LEA32zzi length))>; + def : Pat<(sra vi64:$vx, (vi64 (vec_broadcast i64:$sy))), + (vsral_vvsl vi64:$vx, i64:$sy, (LEA32zzi length))>; + def : Pat<(shl vi32:$vx, (vi32 (vec_broadcast i32:$sy))), + (pvslalo_vvsl vi32:$vx, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), + (LEA32zzi length))>; + def : Pat<(srl vi32:$vx, (vi32 (vec_broadcast i32:$sy))), + (pvsrllo_vvsl vi32:$vx, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), + (LEA32zzi length))>; + def : Pat<(sra vi32:$vx, (vi32 (vec_broadcast i32:$sy))), + (pvsralo_vvsl vi32:$vx, + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32), + (LEA32zzi length))>; +} + +defm : shift_for_vector_length<256, v256i32, v256i64>; +defm : shift_for_vector_length<128, v128i32, v128i64>; +defm : shift_for_vector_length<64, v64i32, v64i64>; +defm : shift_for_vector_length<32, v32i32, v32i64>; +defm : shift_for_vector_length<16, v16i32, v16i64>; +defm : shift_for_vector_length<8, v8i32, v8i64>; +defm : shift_for_vector_length<4, v4i32, v4i64>; +defm : shift_for_vector_length<2, v2i32, v2i64>; diff --git a/llvm/lib/Target/VE/VEInstrVec.td b/llvm/lib/Target/VE/VEInstrVec.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrVec.td @@ -0,0 +1,73 @@ +//===----------------------------------------------------------------------===// +// Vector Instructions +//===----------------------------------------------------------------------===// + +class RVopVal, dag outs, dag ins, string asmstr, list pattern, + InstrItinClass itin = NoItinerary> + : InstVE { + bits<1> cx = 0; + bits<1> cx2 = 0; + bits<1> cs = 0; // y operand is scalar(1) or vector(0) + bits<1> cs2 = 0; + bits<1> cy = 0; // y operand is register(1) or immediate(0) + bits<7> sy = 0; + bits<1> cz = 0; // y operand is register(1) or immediate(0) + bits<7> sz = 0; + bits<8> vx = 0; + bits<8> vy = 0; + bits<8> vz = 0; + bits<8> vw = 0; + bits<4> m = 0; + let op = opVal; + let Inst{8} = cx; + let Inst{9} = cx2; + let Inst{10} = cs; + let Inst{11} = cs2; + let Inst{15-12} = m; + let Inst{16} = cy; + let Inst{23-17} = sy; + let Inst{24} = cz; + let Inst{25-31} = sz; + let Inst{39-32} = vx; + let Inst{47-40} = vy; + let Inst{55-48} = vz; + let Inst{63-56} = vw; +} + +// Pseudo instructions for VR/VM/VM512 spill/restore +// +// These pseudo instructions are used for only spill/restore since +// InlineSpiller asusmes storeRegToStackSlot/loadRegFromStackSlot functions +// emit only single instruction. Those functions emit a single store/load +// instruction or one of these pseudo store/load instructions. +// +// Specifies hasSideEffects = 0 to disable UnmodeledSideEffects. + +let mayLoad = 1, hasSideEffects = 0 in { +def LDVRri : Pseudo< + (outs V64:$vx), (ins MEMri:$addr, I32:$vl), + "# pseudo ldvr $vx, $addr, $vl", []>; +def LDVMri : Pseudo< + (outs VM:$vmx), (ins MEMri:$addr), + "# pseudo ldvm $vmx, $addr", []>; +def LDVM512ri : Pseudo< + (outs VM512:$vmx), (ins MEMri:$addr), + "# pseudo ldvm512 $vmx, $addr", []>; +} +let mayStore = 1, hasSideEffects = 0 in { +def STVRri : Pseudo< + (outs), (ins MEMri:$addr, V64:$vx, I32:$vl), + "# pseudo stvr $addr, $vx, $vl", []>; +def STVMri : Pseudo< + (outs), (ins MEMri:$addr, VM:$vmx), + "# pseudo stvm $addr, $vmx", []>; +def STVM512ri : Pseudo< + (outs), (ins MEMri:$addr, VM512:$vmx), + "# pseudo stvm512 $addr, $vmx", []>; +} + + +// 5.3.2.16. Vector Control Instructions + +let cx = 0, sx = 0, cy = 0, cz = 0, sz = 0, hasSideEffects = 0, isCodeGenOnly = 1 in +def LVL : RR<0xBF, (outs), (ins I32:$sy), "lvl $sy", []>; diff --git a/llvm/lib/Target/VE/VEInstrVecVL.gen.td b/llvm/lib/Target/VE/VEInstrVecVL.gen.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrVecVL.gen.td @@ -0,0 +1,15943 @@ +// inst=VLD asm=vld intrisic=vld_vssl +def vld_vssl : RV<0x81, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vld $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld intrisic=vld_vssvl +def vld_vssvl : RV<0x81, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vld $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld intrisic=vld_vssl +def vld_vIsl : RV<0x81, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vld $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld intrisic=vld_vssvl +def vld_vIsvl : RV<0x81, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vld $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld.nc intrisic=vldnc_vssl +def vldnc_vssl : RV<0x81, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vld.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld.nc intrisic=vldnc_vssvl +def vldnc_vssvl : RV<0x81, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vld.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld.nc intrisic=vldnc_vssl +def vldnc_vIsl : RV<0x81, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vld.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD asm=vld.nc intrisic=vldnc_vssvl +def vldnc_vIsvl : RV<0x81, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vld.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu intrisic=vldu_vssl +def vldu_vssl : RV<0x82, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldu $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu intrisic=vldu_vssvl +def vldu_vssvl : RV<0x82, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldu $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu intrisic=vldu_vssl +def vldu_vIsl : RV<0x82, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldu $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu intrisic=vldu_vssvl +def vldu_vIsvl : RV<0x82, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldu $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu.nc intrisic=vldunc_vssl +def vldunc_vssl : RV<0x82, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldu.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu.nc intrisic=vldunc_vssvl +def vldunc_vssvl : RV<0x82, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldu.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu.nc intrisic=vldunc_vssl +def vldunc_vIsl : RV<0x82, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldu.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU asm=vldu.nc intrisic=vldunc_vssvl +def vldunc_vIsvl : RV<0x82, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldu.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx intrisic=vldlsx_vssl +def vldlsx_vssl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl.sx $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx intrisic=vldlsx_vssvl +def vldlsx_vssvl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl.sx $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx intrisic=vldlsx_vssl +def vldlsx_vIsl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl.sx $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx intrisic=vldlsx_vssvl +def vldlsx_vIsvl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl.sx $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx.nc intrisic=vldlsxnc_vssl +def vldlsxnc_vssl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl.sx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx.nc intrisic=vldlsxnc_vssvl +def vldlsxnc_vssvl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl.sx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx.nc intrisic=vldlsxnc_vssl +def vldlsxnc_vIsl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl.sx.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.sx.nc intrisic=vldlsxnc_vssvl +def vldlsxnc_vIsvl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl.sx.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx intrisic=vldlzx_vssl +def vldlzx_vssl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl.zx $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx intrisic=vldlzx_vssvl +def vldlzx_vssvl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl.zx $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx intrisic=vldlzx_vssl +def vldlzx_vIsl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl.zx $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx intrisic=vldlzx_vssvl +def vldlzx_vIsvl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl.zx $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx.nc intrisic=vldlzxnc_vssl +def vldlzxnc_vssl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl.zx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx.nc intrisic=vldlzxnc_vssvl +def vldlzxnc_vssvl : RV<0x83, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl.zx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx.nc intrisic=vldlzxnc_vssl +def vldlzxnc_vIsl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl.zx.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL asm=vldl.zx.nc intrisic=vldlzxnc_vssvl +def vldlzxnc_vIsvl : RV<0x83, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl.zx.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d intrisic=vld2d_vssl +def vld2d_vssl : RV<0xc1, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vld2d $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d intrisic=vld2d_vssvl +def vld2d_vssvl : RV<0xc1, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vld2d $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d intrisic=vld2d_vssl +def vld2d_vIsl : RV<0xc1, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vld2d $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d intrisic=vld2d_vssvl +def vld2d_vIsvl : RV<0xc1, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vld2d $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d.nc intrisic=vld2dnc_vssl +def vld2dnc_vssl : RV<0xc1, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vld2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d.nc intrisic=vld2dnc_vssvl +def vld2dnc_vssvl : RV<0xc1, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vld2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d.nc intrisic=vld2dnc_vssl +def vld2dnc_vIsl : RV<0xc1, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vld2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLD2D asm=vld2d.nc intrisic=vld2dnc_vssvl +def vld2dnc_vIsvl : RV<0xc1, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vld2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d intrisic=vldu2d_vssl +def vldu2d_vssl : RV<0xc2, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldu2d $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d intrisic=vldu2d_vssvl +def vldu2d_vssvl : RV<0xc2, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldu2d $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d intrisic=vldu2d_vssl +def vldu2d_vIsl : RV<0xc2, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldu2d $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d intrisic=vldu2d_vssvl +def vldu2d_vIsvl : RV<0xc2, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldu2d $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d.nc intrisic=vldu2dnc_vssl +def vldu2dnc_vssl : RV<0xc2, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldu2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d.nc intrisic=vldu2dnc_vssvl +def vldu2dnc_vssvl : RV<0xc2, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldu2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d.nc intrisic=vldu2dnc_vssl +def vldu2dnc_vIsl : RV<0xc2, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldu2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDU2D asm=vldu2d.nc intrisic=vldu2dnc_vssvl +def vldu2dnc_vIsvl : RV<0xc2, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldu2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx intrisic=vldl2dsx_vssl +def vldl2dsx_vssl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl2d.sx $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx intrisic=vldl2dsx_vssvl +def vldl2dsx_vssvl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.sx $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx intrisic=vldl2dsx_vssl +def vldl2dsx_vIsl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl2d.sx $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx intrisic=vldl2dsx_vssvl +def vldl2dsx_vIsvl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.sx $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx.nc intrisic=vldl2dsxnc_vssl +def vldl2dsxnc_vssl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl2d.sx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx.nc intrisic=vldl2dsxnc_vssvl +def vldl2dsxnc_vssvl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.sx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx.nc intrisic=vldl2dsxnc_vssl +def vldl2dsxnc_vIsl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl2d.sx.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.sx.nc intrisic=vldl2dsxnc_vssvl +def vldl2dsxnc_vIsvl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.sx.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx intrisic=vldl2dzx_vssl +def vldl2dzx_vssl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl2d.zx $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx intrisic=vldl2dzx_vssvl +def vldl2dzx_vssvl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.zx $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx intrisic=vldl2dzx_vssl +def vldl2dzx_vIsl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl2d.zx $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx intrisic=vldl2dzx_vssvl +def vldl2dzx_vIsvl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.zx $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx.nc intrisic=vldl2dzxnc_vssl +def vldl2dzxnc_vssl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, I32:$vl), + "vldl2d.zx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx.nc intrisic=vldl2dzxnc_vssvl +def vldl2dzxnc_vssvl : RV<0xc3, (outs V64:$vx), (ins I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.zx.nc $vx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx.nc intrisic=vldl2dzxnc_vssl +def vldl2dzxnc_vIsl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "vldl2d.zx.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VLDL2D asm=vldl2d.zx.nc intrisic=vldl2dzxnc_vssvl +def vldl2dzxnc_vIsvl : RV<0xc3, (outs V64:$vx), (ins simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vldl2d.zx.nc $vx,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst intrisic=vst_vssl +def vst_vssl : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst intrisic=vst_vssl +def vst_vIsl : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst intrisic=vst_vssml +def vst_vssml : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst intrisic=vst_vssml +def vst_vIsml : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc intrisic=vstnc_vssl +def vstnc_vssl : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc intrisic=vstnc_vssl +def vstnc_vIsl : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc intrisic=vstnc_vssml +def vstnc_vssml : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst.nc $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc intrisic=vstnc_vssml +def vstnc_vIsml : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst.nc $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.ot intrisic=vstot_vssl +def vstot_vssl : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.ot intrisic=vstot_vssl +def vstot_vIsl : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.ot intrisic=vstot_vssml +def vstot_vssml : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.ot intrisic=vstot_vssml +def vstot_vIsml : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc.ot intrisic=vstncot_vssl +def vstncot_vssl : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst.nc.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc.ot intrisic=vstncot_vssl +def vstncot_vIsl : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst.nc.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc.ot intrisic=vstncot_vssml +def vstncot_vssml : RV<0x91, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst.nc.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST asm=vst.nc.ot intrisic=vstncot_vssml +def vstncot_vIsml : RV<0x91, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst.nc.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu intrisic=vstu_vssl +def vstu_vssl : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu intrisic=vstu_vssl +def vstu_vIsl : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu intrisic=vstu_vssml +def vstu_vssml : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu intrisic=vstu_vssml +def vstu_vIsml : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc intrisic=vstunc_vssl +def vstunc_vssl : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc intrisic=vstunc_vssl +def vstunc_vIsl : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc intrisic=vstunc_vssml +def vstunc_vssml : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu.nc $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc intrisic=vstunc_vssml +def vstunc_vIsml : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu.nc $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.ot intrisic=vstuot_vssl +def vstuot_vssl : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.ot intrisic=vstuot_vssl +def vstuot_vIsl : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.ot intrisic=vstuot_vssml +def vstuot_vssml : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.ot intrisic=vstuot_vssml +def vstuot_vIsml : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc.ot intrisic=vstuncot_vssl +def vstuncot_vssl : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu.nc.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc.ot intrisic=vstuncot_vssl +def vstuncot_vIsl : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu.nc.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc.ot intrisic=vstuncot_vssml +def vstuncot_vssml : RV<0x92, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu.nc.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU asm=vstu.nc.ot intrisic=vstuncot_vssml +def vstuncot_vIsml : RV<0x92, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu.nc.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl intrisic=vstl_vssl +def vstl_vssl : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl intrisic=vstl_vssl +def vstl_vIsl : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl intrisic=vstl_vssml +def vstl_vssml : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl intrisic=vstl_vssml +def vstl_vIsml : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc intrisic=vstlnc_vssl +def vstlnc_vssl : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc intrisic=vstlnc_vssl +def vstlnc_vIsl : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc intrisic=vstlnc_vssml +def vstlnc_vssml : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl.nc $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc intrisic=vstlnc_vssml +def vstlnc_vIsml : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl.nc $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.ot intrisic=vstlot_vssl +def vstlot_vssl : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.ot intrisic=vstlot_vssl +def vstlot_vIsl : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.ot intrisic=vstlot_vssml +def vstlot_vssml : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.ot intrisic=vstlot_vssml +def vstlot_vIsml : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc.ot intrisic=vstlncot_vssl +def vstlncot_vssl : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl.nc.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc.ot intrisic=vstlncot_vssl +def vstlncot_vIsl : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl.nc.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc.ot intrisic=vstlncot_vssml +def vstlncot_vssml : RV<0x93, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl.nc.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL asm=vstl.nc.ot intrisic=vstlncot_vssml +def vstlncot_vIsml : RV<0x93, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl.nc.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d intrisic=vst2d_vssl +def vst2d_vssl : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst2d $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d intrisic=vst2d_vssl +def vst2d_vIsl : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst2d $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d intrisic=vst2d_vssml +def vst2d_vssml : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst2d $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d intrisic=vst2d_vssml +def vst2d_vIsml : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst2d $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc intrisic=vst2dnc_vssl +def vst2dnc_vssl : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc intrisic=vst2dnc_vssl +def vst2dnc_vIsl : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc intrisic=vst2dnc_vssml +def vst2dnc_vssml : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst2d.nc $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc intrisic=vst2dnc_vssml +def vst2dnc_vIsml : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst2d.nc $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.ot intrisic=vst2dot_vssl +def vst2dot_vssl : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst2d.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.ot intrisic=vst2dot_vssl +def vst2dot_vIsl : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst2d.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.ot intrisic=vst2dot_vssml +def vst2dot_vssml : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst2d.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.ot intrisic=vst2dot_vssml +def vst2dot_vIsml : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst2d.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc.ot intrisic=vst2dncot_vssl +def vst2dncot_vssl : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vst2d.nc.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc.ot intrisic=vst2dncot_vssl +def vst2dncot_vIsl : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vst2d.nc.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc.ot intrisic=vst2dncot_vssml +def vst2dncot_vssml : RV<0xd1, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vst2d.nc.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VST2D asm=vst2d.nc.ot intrisic=vst2dncot_vssml +def vst2dncot_vIsml : RV<0xd1, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vst2d.nc.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d intrisic=vstu2d_vssl +def vstu2d_vssl : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu2d $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d intrisic=vstu2d_vssl +def vstu2d_vIsl : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu2d $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d intrisic=vstu2d_vssml +def vstu2d_vssml : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu2d $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d intrisic=vstu2d_vssml +def vstu2d_vIsml : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu2d $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc intrisic=vstu2dnc_vssl +def vstu2dnc_vssl : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc intrisic=vstu2dnc_vssl +def vstu2dnc_vIsl : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc intrisic=vstu2dnc_vssml +def vstu2dnc_vssml : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu2d.nc $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc intrisic=vstu2dnc_vssml +def vstu2dnc_vIsml : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu2d.nc $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.ot intrisic=vstu2dot_vssl +def vstu2dot_vssl : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu2d.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.ot intrisic=vstu2dot_vssl +def vstu2dot_vIsl : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu2d.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.ot intrisic=vstu2dot_vssml +def vstu2dot_vssml : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu2d.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.ot intrisic=vstu2dot_vssml +def vstu2dot_vIsml : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu2d.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc.ot intrisic=vstu2dncot_vssl +def vstu2dncot_vssl : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstu2d.nc.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc.ot intrisic=vstu2dncot_vssl +def vstu2dncot_vIsl : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstu2d.nc.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc.ot intrisic=vstu2dncot_vssml +def vstu2dncot_vssml : RV<0xd2, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstu2d.nc.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTU2D asm=vstu2d.nc.ot intrisic=vstu2dncot_vssml +def vstu2dncot_vIsml : RV<0xd2, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstu2d.nc.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d intrisic=vstl2d_vssl +def vstl2d_vssl : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl2d $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d intrisic=vstl2d_vssl +def vstl2d_vIsl : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl2d $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d intrisic=vstl2d_vssml +def vstl2d_vssml : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl2d $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d intrisic=vstl2d_vssml +def vstl2d_vIsml : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl2d $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc intrisic=vstl2dnc_vssl +def vstl2dnc_vssl : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl2d.nc $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc intrisic=vstl2dnc_vssl +def vstl2dnc_vIsl : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl2d.nc $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc intrisic=vstl2dnc_vssml +def vstl2dnc_vssml : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl2d.nc $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc intrisic=vstl2dnc_vssml +def vstl2dnc_vIsml : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl2d.nc $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.ot intrisic=vstl2dot_vssl +def vstl2dot_vssl : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl2d.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.ot intrisic=vstl2dot_vssl +def vstl2dot_vIsl : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl2d.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.ot intrisic=vstl2dot_vssml +def vstl2dot_vssml : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl2d.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.ot intrisic=vstl2dot_vssml +def vstl2dot_vIsml : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl2d.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc.ot intrisic=vstl2dncot_vssl +def vstl2dncot_vssl : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, I32:$vl), + "vstl2d.nc.ot $vx,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc.ot intrisic=vstl2dncot_vssl +def vstl2dncot_vIsl : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, I32:$vl), + "vstl2d.nc.ot $vx,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc.ot intrisic=vstl2dncot_vssml +def vstl2dncot_vssml : RV<0xd3, (outs ), (ins V64:$vx, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vstl2d.nc.ot $vx,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSTL2D asm=vstl2d.nc.ot intrisic=vstl2dncot_vssml +def vstl2dncot_vIsml : RV<0xd3, (outs ), (ins V64:$vx, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vstl2d.nc.ot $vx,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=PFCHV asm=pfchv intrisic=pfchv_ssl +def pfchv_ssl : RV<0x80, (outs ), (ins I64:$sy, I64:$sz, I32:$vl), + "pfchv $sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=PFCHV asm=pfchv intrisic=pfchv_ssl +def pfchv_Isl : RV<0x80, (outs ), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "pfchv $I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=PFCHV asm=pfchv.nc intrisic=pfchvnc_ssl +def pfchvnc_ssl : RV<0x80, (outs ), (ins I64:$sy, I64:$sz, I32:$vl), + "pfchv.nc $sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=PFCHV asm=pfchv.nc intrisic=pfchvnc_ssl +def pfchvnc_Isl : RV<0x80, (outs ), (ins simm7Op64:$I, I64:$sz, I32:$vl), + "pfchv.nc $I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=LVM asm=lvm intrisic=lvm_mmss +def lvm_mmss : RV<0xb7, (outs VM:$vmx), (ins VM:$vmd, I64:$sy, I64:$sz), + "lvm $vmx,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vmx = $vmd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=LVM asm=lvm intrisic=lvm_mmss +def lvm_mmIs : RV<0xb7, (outs VM:$vmx), (ins VM:$vmd, simm7Op64:$N, I64:$sz), + "lvm $vmx,$N,$sz", [], NoItinerary> +{ + let Constraints = "$vmx = $vmd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=LVM asm=lvm intrisic=lvm_MMss +def lvm_MMss : Pseudo<(outs VM512:$vmx), (ins VM512:$vmd, I64:$sy, I64:$sz), + "# lvm $vmx,$sy,$sz", []> +{ + let Constraints = "$vmx = $vmd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=LVM asm=lvm intrisic=lvm_MMss +def lvm_MMIs : Pseudo<(outs VM512:$vmx), (ins VM512:$vmd, simm7Op64:$N, I64:$sz), + "# lvm $vmx,$N,$sz", []> +{ + let Constraints = "$vmx = $vmd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=SVM asm=svm intrisic=svm_sms +def svm_sms : RV<0xa7, (outs I64:$sx), (ins VM:$vmz, I64:$sy), + "svm $sx,$vmz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=SVM asm=svm intrisic=svm_sms +def svm_smI : RV<0xa7, (outs I64:$sx), (ins VM:$vmz, simm7Op64:$N), + "svm $sx,$vmz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=SVM asm=svm intrisic=svm_sMs +def svm_sMs : Pseudo<(outs I64:$sx), (ins VM512:$vmz, I64:$sy), + "# svm $sx,$vmz,$sy", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=SVM asm=svm intrisic=svm_sMs +def svm_sMI : Pseudo<(outs I64:$sx), (ins VM512:$vmz, simm7Op64:$N), + "# svm $sx,$vmz,$N", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=VBRD asm=vbrd intrisic=vbrdl_vsl +def vbrd_vsl : RV<0x8c, (outs V64:$vx), (ins I64:$sy, I32:$vl), + "vbrd $vx,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrd intrisic=vbrdl_vsvl +def vbrd_vsvl : RV<0x8c, (outs V64:$vx), (ins I64:$sy, V64:$vd, I32:$vl), + "vbrd $vx,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrd intrisic=vbrdl_vsmvl +def vbrd_vsmvl : RV<0x8c, (outs V64:$vx), (ins I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vbrd $vx,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrd intrisic=vbrdl_vsl +def vbrd_vIl : RV<0x8c, (outs V64:$vx), (ins simm7Op64:$I, I32:$vl), + "vbrd $vx,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrd intrisic=vbrdl_vsvl +def vbrd_vIvl : RV<0x8c, (outs V64:$vx), (ins simm7Op64:$I, V64:$vd, I32:$vl), + "vbrd $vx,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrd intrisic=vbrdl_vsmvl +def vbrd_vImvl : RV<0x8c, (outs V64:$vx), (ins simm7Op64:$I, VM:$vm, V64:$vd, I32:$vl), + "vbrd $vx,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdu intrisic=vbrds_vsl +def vbrdu_vsl : RV<0x8c, (outs V64:$vx), (ins F32:$sy, I32:$vl), + "vbrdu $vx,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdu intrisic=vbrds_vsvl +def vbrdu_vsvl : RV<0x8c, (outs V64:$vx), (ins F32:$sy, V64:$vd, I32:$vl), + "vbrdu $vx,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdu intrisic=vbrds_vsmvl +def vbrdu_vsmvl : RV<0x8c, (outs V64:$vx), (ins F32:$sy, VM:$vm, V64:$vd, I32:$vl), + "vbrdu $vx,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdl intrisic=vbrdw_vsl +def vbrdl_vsl : RV<0x8c, (outs V64:$vx), (ins I32:$sy, I32:$vl), + "vbrdl $vx,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdl intrisic=vbrdw_vsvl +def vbrdl_vsvl : RV<0x8c, (outs V64:$vx), (ins I32:$sy, V64:$vd, I32:$vl), + "vbrdl $vx,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdl intrisic=vbrdw_vsmvl +def vbrdl_vsmvl : RV<0x8c, (outs V64:$vx), (ins I32:$sy, VM:$vm, V64:$vd, I32:$vl), + "vbrdl $vx,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdl intrisic=vbrdw_vsl +def vbrdl_vIl : RV<0x8c, (outs V64:$vx), (ins simm7Op32:$I, I32:$vl), + "vbrdl $vx,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdl intrisic=vbrdw_vsvl +def vbrdl_vIvl : RV<0x8c, (outs V64:$vx), (ins simm7Op32:$I, V64:$vd, I32:$vl), + "vbrdl $vx,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=vbrdl intrisic=vbrdw_vsmvl +def vbrdl_vImvl : RV<0x8c, (outs V64:$vx), (ins simm7Op32:$I, VM:$vm, V64:$vd, I32:$vl), + "vbrdl $vx,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=pvbrd intrisic=pvbrd_vsl +def pvbrd_vsl : RV<0x8c, (outs V64:$vx), (ins I64:$sy, I32:$vl), + "pvbrd $vx,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=pvbrd intrisic=pvbrd_vsvl +def pvbrd_vsvl : RV<0x8c, (outs V64:$vx), (ins I64:$sy, V64:$vd, I32:$vl), + "pvbrd $vx,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VBRD asm=pvbrd intrisic=pvbrd_vsMvl +def pvbrd_vsMvl : RV<0x8c, (outs V64:$vx), (ins I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvbrd $vx,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMV asm=vmv intrisic=vmv_vsvl +def vmv_vsvl : RV<0x9c, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmv $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMV asm=vmv intrisic=vmv_vsvvl +def vmv_vsvvl : RV<0x9c, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmv $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMV asm=vmv intrisic=vmv_vsvl +def vmv_vIvl : RV<0x9c, (outs V64:$vx), (ins simm7Op32:$N, V64:$vz, I32:$vl), + "vmv $vx,$N,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMV asm=vmv intrisic=vmv_vsvvl +def vmv_vIvvl : RV<0x9c, (outs V64:$vx), (ins simm7Op32:$N, V64:$vz, V64:$vd, I32:$vl), + "vmv $vx,$N,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMV asm=vmv intrisic=vmv_vsvmvl +def vmv_vsvmvl : RV<0x9c, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmv $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMV asm=vmv intrisic=vmv_vsvmvl +def vmv_vIvmvl : RV<0x9c, (outs V64:$vx), (ins simm7Op32:$N, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmv $vx,$N,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vvvl +def vaddul_vvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vaddu.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vvvvl +def vaddul_vvvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vaddu.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vsvl +def vaddul_vsvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vaddu.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vsvvl +def vaddul_vsvvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vaddu.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vsvl +def vaddul_vIvl : RV<0xc8, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vaddu.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vsvvl +def vaddul_vIvvl : RV<0xc8, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vaddu.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vvvmvl +def vaddul_vvvmvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vaddu.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vsvmvl +def vaddul_vsvmvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vaddu.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.l intrisic=vaddul_vsvmvl +def vaddul_vIvmvl : RV<0xc8, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vaddu.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vvvl +def vadduw_vvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vaddu.w $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vvvvl +def vadduw_vvvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vaddu.w $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vsvl +def vadduw_vsvl : RV<0xc8, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vaddu.w $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vsvvl +def vadduw_vsvvl : RV<0xc8, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vaddu.w $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vsvl +def vadduw_vIvl : RV<0xc8, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vaddu.w $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vsvvl +def vadduw_vIvvl : RV<0xc8, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vaddu.w $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vvvmvl +def vadduw_vvvmvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vaddu.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vsvmvl +def vadduw_vsvmvl : RV<0xc8, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vaddu.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=vaddu.w intrisic=vadduw_vsvmvl +def vadduw_vIvmvl : RV<0xc8, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vaddu.w $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=pvaddu intrisic=pvaddu_vvvl +def pvaddu_vvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvaddu $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=pvaddu intrisic=pvaddu_vvvvl +def pvaddu_vvvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvaddu $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=pvaddu intrisic=pvaddu_vsvl +def pvaddu_vsvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvaddu $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=pvaddu intrisic=pvaddu_vsvvl +def pvaddu_vsvvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvaddu $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=pvaddu intrisic=pvaddu_vvvMvl +def pvaddu_vvvMvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvaddu $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADD asm=pvaddu intrisic=pvaddu_vsvMvl +def pvaddu_vsvMvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvaddu $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vvvl +def vaddswsx_vvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vadds.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vvvvl +def vaddswsx_vvvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vadds.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vsvl +def vaddswsx_vsvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vadds.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vsvvl +def vaddswsx_vsvvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vadds.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vsvl +def vaddswsx_vIvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vadds.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vsvvl +def vaddswsx_vIvvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vadds.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vvvmvl +def vaddswsx_vvvmvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vsvmvl +def vaddswsx_vsvmvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.sx intrisic=vaddswsx_vsvmvl +def vaddswsx_vIvmvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vvvl +def vaddswzx_vvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vadds.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vvvvl +def vaddswzx_vvvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vadds.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vsvl +def vaddswzx_vsvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vadds.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vsvvl +def vaddswzx_vsvvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vadds.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vsvl +def vaddswzx_vIvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vadds.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vsvvl +def vaddswzx_vIvvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vadds.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vvvmvl +def vaddswzx_vvvmvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vsvmvl +def vaddswzx_vsvmvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=vadds.w.zx intrisic=vaddswzx_vsvmvl +def vaddswzx_vIvmvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=pvadds intrisic=pvadds_vvvl +def pvadds_vvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvadds $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=pvadds intrisic=pvadds_vvvvl +def pvadds_vvvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvadds $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=pvadds intrisic=pvadds_vsvl +def pvadds_vsvl : RV<0xca, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvadds $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=pvadds intrisic=pvadds_vsvvl +def pvadds_vsvvl : RV<0xca, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvadds $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=pvadds intrisic=pvadds_vvvMvl +def pvadds_vvvMvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvadds $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADS asm=pvadds intrisic=pvadds_vsvMvl +def pvadds_vsvMvl : RV<0xca, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvadds $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vvvl +def vaddsl_vvvl : RV<0x8b, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vadds.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vvvvl +def vaddsl_vvvvl : RV<0x8b, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vadds.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vsvl +def vaddsl_vsvl : RV<0x8b, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vadds.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vsvvl +def vaddsl_vsvvl : RV<0x8b, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vadds.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vsvl +def vaddsl_vIvl : RV<0x8b, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vadds.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vsvvl +def vaddsl_vIvvl : RV<0x8b, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vadds.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vvvmvl +def vaddsl_vvvmvl : RV<0x8b, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vsvmvl +def vaddsl_vsvmvl : RV<0x8b, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VADX asm=vadds.l intrisic=vaddsl_vsvmvl +def vaddsl_vIvmvl : RV<0x8b, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vadds.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vvvl +def vsubul_vvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vsubu.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vvvvl +def vsubul_vvvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vsubu.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vsvl +def vsubul_vsvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vsubu.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vsvvl +def vsubul_vsvvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vsubu.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vsvl +def vsubul_vIvl : RV<0xc8, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vsubu.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vsvvl +def vsubul_vIvvl : RV<0xc8, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vsubu.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vvvmvl +def vsubul_vvvmvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubu.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vsvmvl +def vsubul_vsvmvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubu.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.l intrisic=vsubul_vsvmvl +def vsubul_vIvmvl : RV<0xc8, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubu.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vvvl +def vsubuw_vvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vsubu.w $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vvvvl +def vsubuw_vvvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vsubu.w $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vsvl +def vsubuw_vsvl : RV<0xc8, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vsubu.w $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vsvvl +def vsubuw_vsvvl : RV<0xc8, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vsubu.w $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vsvl +def vsubuw_vIvl : RV<0xc8, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vsubu.w $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vsvvl +def vsubuw_vIvvl : RV<0xc8, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vsubu.w $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vvvmvl +def vsubuw_vvvmvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubu.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vsvmvl +def vsubuw_vsvmvl : RV<0xc8, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubu.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=vsubu.w intrisic=vsubuw_vsvmvl +def vsubuw_vIvmvl : RV<0xc8, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubu.w $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=pvsubu intrisic=pvsubu_vvvl +def pvsubu_vvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvsubu $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=pvsubu intrisic=pvsubu_vvvvl +def pvsubu_vvvvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvsubu $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=pvsubu intrisic=pvsubu_vsvl +def pvsubu_vsvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvsubu $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=pvsubu intrisic=pvsubu_vsvvl +def pvsubu_vsvvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvsubu $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=pvsubu intrisic=pvsubu_vvvMvl +def pvsubu_vvvMvl : RV<0xc8, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvsubu $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUB asm=pvsubu intrisic=pvsubu_vsvMvl +def pvsubu_vsvMvl : RV<0xc8, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvsubu $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vvvl +def vsubswsx_vvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vsubs.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vvvvl +def vsubswsx_vvvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vsubs.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vsvl +def vsubswsx_vsvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vsubs.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vsvvl +def vsubswsx_vsvvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vsubs.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vsvl +def vsubswsx_vIvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vsubs.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vsvvl +def vsubswsx_vIvvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vsubs.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vvvmvl +def vsubswsx_vvvmvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vsvmvl +def vsubswsx_vsvmvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.sx intrisic=vsubswsx_vsvmvl +def vsubswsx_vIvmvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vvvl +def vsubswzx_vvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vsubs.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vvvvl +def vsubswzx_vvvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vsubs.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vsvl +def vsubswzx_vsvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vsubs.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vsvvl +def vsubswzx_vsvvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vsubs.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vsvl +def vsubswzx_vIvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vsubs.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vsvvl +def vsubswzx_vIvvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vsubs.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vvvmvl +def vsubswzx_vvvmvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vsvmvl +def vsubswzx_vsvmvl : RV<0xca, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=vsubs.w.zx intrisic=vsubswzx_vsvmvl +def vsubswzx_vIvmvl : RV<0xca, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=pvsubs intrisic=pvsubs_vvvl +def pvsubs_vvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvsubs $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=pvsubs intrisic=pvsubs_vvvvl +def pvsubs_vvvvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvsubs $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=pvsubs intrisic=pvsubs_vsvl +def pvsubs_vsvl : RV<0xca, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvsubs $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=pvsubs intrisic=pvsubs_vsvvl +def pvsubs_vsvvl : RV<0xca, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvsubs $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=pvsubs intrisic=pvsubs_vvvMvl +def pvsubs_vvvMvl : RV<0xca, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvsubs $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBS asm=pvsubs intrisic=pvsubs_vsvMvl +def pvsubs_vsvMvl : RV<0xca, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvsubs $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vvvl +def vsubsl_vvvl : RV<0x8b, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vsubs.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vvvvl +def vsubsl_vvvvl : RV<0x8b, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vsubs.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vsvl +def vsubsl_vsvl : RV<0x8b, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vsubs.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vsvvl +def vsubsl_vsvvl : RV<0x8b, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vsubs.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vsvl +def vsubsl_vIvl : RV<0x8b, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vsubs.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vsvvl +def vsubsl_vIvvl : RV<0x8b, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vsubs.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vvvmvl +def vsubsl_vvvmvl : RV<0x8b, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vsvmvl +def vsubsl_vsvmvl : RV<0x8b, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSBX asm=vsubs.l intrisic=vsubsl_vsvmvl +def vsubsl_vIvmvl : RV<0x8b, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vsubs.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vvvl +def vmulul_vvvl : RV<0xc9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmulu.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vvvvl +def vmulul_vvvvl : RV<0xc9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmulu.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vsvl +def vmulul_vsvl : RV<0xc9, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vmulu.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vsvvl +def vmulul_vsvvl : RV<0xc9, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmulu.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vsvl +def vmulul_vIvl : RV<0xc9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vmulu.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vsvvl +def vmulul_vIvvl : RV<0xc9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vmulu.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vvvmvl +def vmulul_vvvmvl : RV<0xc9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmulu.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vsvmvl +def vmulul_vsvmvl : RV<0xc9, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmulu.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.l intrisic=vmulul_vsvmvl +def vmulul_vIvmvl : RV<0xc9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmulu.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vvvl +def vmuluw_vvvl : RV<0xc9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmulu.w $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vvvvl +def vmuluw_vvvvl : RV<0xc9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmulu.w $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vsvl +def vmuluw_vsvl : RV<0xc9, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmulu.w $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vsvvl +def vmuluw_vsvvl : RV<0xc9, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmulu.w $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vsvl +def vmuluw_vIvl : RV<0xc9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmulu.w $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vsvvl +def vmuluw_vIvvl : RV<0xc9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmulu.w $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vvvmvl +def vmuluw_vvvmvl : RV<0xc9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmulu.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vsvmvl +def vmuluw_vsvmvl : RV<0xc9, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmulu.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPY asm=vmulu.w intrisic=vmuluw_vsvmvl +def vmuluw_vIvmvl : RV<0xc9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmulu.w $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vvvl +def vmulswsx_vvvl : RV<0xcb, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmuls.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vvvvl +def vmulswsx_vvvvl : RV<0xcb, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vsvl +def vmulswsx_vsvl : RV<0xcb, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmuls.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vsvvl +def vmulswsx_vsvvl : RV<0xcb, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vsvl +def vmulswsx_vIvl : RV<0xcb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmuls.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vsvvl +def vmulswsx_vIvvl : RV<0xcb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmuls.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vvvmvl +def vmulswsx_vvvmvl : RV<0xcb, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vsvmvl +def vmulswsx_vsvmvl : RV<0xcb, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.sx intrisic=vmulswsx_vsvmvl +def vmulswsx_vIvmvl : RV<0xcb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vvvl +def vmulswzx_vvvl : RV<0xcb, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmuls.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vvvvl +def vmulswzx_vvvvl : RV<0xcb, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vsvl +def vmulswzx_vsvl : RV<0xcb, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmuls.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vsvvl +def vmulswzx_vsvvl : RV<0xcb, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vsvl +def vmulswzx_vIvl : RV<0xcb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmuls.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vsvvl +def vmulswzx_vIvvl : RV<0xcb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmuls.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vvvmvl +def vmulswzx_vvvmvl : RV<0xcb, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vsvmvl +def vmulswzx_vsvmvl : RV<0xcb, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPS asm=vmuls.w.zx intrisic=vmulswzx_vsvmvl +def vmulswzx_vIvmvl : RV<0xcb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vvvl +def vmulsl_vvvl : RV<0xdb, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmuls.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vvvvl +def vmulsl_vvvvl : RV<0xdb, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vsvl +def vmulsl_vsvl : RV<0xdb, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vmuls.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vsvvl +def vmulsl_vsvvl : RV<0xdb, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vsvl +def vmulsl_vIvl : RV<0xdb, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vmuls.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vsvvl +def vmulsl_vIvvl : RV<0xdb, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vmuls.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vvvmvl +def vmulsl_vvvmvl : RV<0xdb, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vsvmvl +def vmulsl_vsvmvl : RV<0xdb, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPX asm=vmuls.l intrisic=vmulsl_vsvmvl +def vmulsl_vIvmvl : RV<0xdb, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmuls.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPD asm=vmuls.l.w intrisic=vmulslw_vvvl +def vmulslw_vvvl : RV<0xd9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmuls.l.w $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPD asm=vmuls.l.w intrisic=vmulslw_vvvvl +def vmulslw_vvvvl : RV<0xd9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.l.w $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPD asm=vmuls.l.w intrisic=vmulslw_vsvl +def vmulslw_vsvl : RV<0xd9, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmuls.l.w $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPD asm=vmuls.l.w intrisic=vmulslw_vsvvl +def vmulslw_vsvvl : RV<0xd9, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmuls.l.w $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPD asm=vmuls.l.w intrisic=vmulslw_vsvl +def vmulslw_vIvl : RV<0xd9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmuls.l.w $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMPD asm=vmuls.l.w intrisic=vmulslw_vsvvl +def vmulslw_vIvvl : RV<0xd9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmuls.l.w $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvvl +def vdivul_vvvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vdivu.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvvvl +def vdivul_vvvvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vdivu.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vsvl +def vdivul_vsvl : RV<0xe9, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vdivu.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vsvvl +def vdivul_vsvvl : RV<0xe9, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vdivu.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vsvl +def vdivul_vIvl : RV<0xe9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vdivu.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vsvvl +def vdivul_vIvvl : RV<0xe9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vdivu.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvvmvl +def vdivul_vvvmvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivu.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vsvmvl +def vdivul_vsvmvl : RV<0xe9, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivu.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vsvmvl +def vdivul_vIvmvl : RV<0xe9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivu.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvvl +def vdivuw_vvvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vdivu.w $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvvvl +def vdivuw_vvvvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vdivu.w $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vsvl +def vdivuw_vsvl : RV<0xe9, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vdivu.w $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vsvvl +def vdivuw_vsvvl : RV<0xe9, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vdivu.w $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vsvl +def vdivuw_vIvl : RV<0xe9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vdivu.w $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vsvvl +def vdivuw_vIvvl : RV<0xe9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vdivu.w $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvvmvl +def vdivuw_vvvmvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivu.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vsvmvl +def vdivuw_vsvmvl : RV<0xe9, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivu.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vsvmvl +def vdivuw_vIvmvl : RV<0xe9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivu.w $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvsl +def vdivul_vvsl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, I64:$sy, I32:$vl), + "vdivu.l $vx,$vy,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvsvl +def vdivul_vvsvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vd, I32:$vl), + "vdivu.l $vx,$vy,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvsl +def vdivul_vvIl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I32:$vl), + "vdivu.l $vx,$vy,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvsvl +def vdivul_vvIvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, V64:$vd, I32:$vl), + "vdivu.l $vx,$vy,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvsmvl +def vdivul_vvsmvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vdivu.l $vx,$vy,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.l intrisic=vdivul_vvsmvl +def vdivul_vvImvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, VM:$vm, V64:$vd, I32:$vl), + "vdivu.l $vx,$vy,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvsl +def vdivuw_vvsl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, I32:$sy, I32:$vl), + "vdivu.w $vx,$vy,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvsvl +def vdivuw_vvsvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, I32:$sy, V64:$vd, I32:$vl), + "vdivu.w $vx,$vy,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvsl +def vdivuw_vvIl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, I32:$vl), + "vdivu.w $vx,$vy,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvsvl +def vdivuw_vvIvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, V64:$vd, I32:$vl), + "vdivu.w $vx,$vy,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvsmvl +def vdivuw_vvsmvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, I32:$sy, VM:$vm, V64:$vd, I32:$vl), + "vdivu.w $vx,$vy,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDIV asm=vdivu.w intrisic=vdivuw_vvsmvl +def vdivuw_vvImvl : RV<0xe9, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, VM:$vm, V64:$vd, I32:$vl), + "vdivu.w $vx,$vy,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvvl +def vdivswsx_vvvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vdivs.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvvvl +def vdivswsx_vvvvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vsvl +def vdivswsx_vsvl : RV<0xeb, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vdivs.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vsvvl +def vdivswsx_vsvvl : RV<0xeb, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vsvl +def vdivswsx_vIvl : RV<0xeb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vdivs.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vsvvl +def vdivswsx_vIvvl : RV<0xeb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvvmvl +def vdivswsx_vvvmvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vsvmvl +def vdivswsx_vsvmvl : RV<0xeb, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vsvmvl +def vdivswsx_vIvmvl : RV<0xeb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvvl +def vdivswzx_vvvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vdivs.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvvvl +def vdivswzx_vvvvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vsvl +def vdivswzx_vsvl : RV<0xeb, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vdivs.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vsvvl +def vdivswzx_vsvvl : RV<0xeb, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vsvl +def vdivswzx_vIvl : RV<0xeb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vdivs.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vsvvl +def vdivswzx_vIvvl : RV<0xeb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvvmvl +def vdivswzx_vvvmvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vsvmvl +def vdivswzx_vsvmvl : RV<0xeb, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vsvmvl +def vdivswzx_vIvmvl : RV<0xeb, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvsl +def vdivswsx_vvsl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I32:$sy, I32:$vl), + "vdivs.w.sx $vx,$vy,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvsvl +def vdivswsx_vvsvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I32:$sy, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$vy,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvsl +def vdivswsx_vvIl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, I32:$vl), + "vdivs.w.sx $vx,$vy,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvsvl +def vdivswsx_vvIvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$vy,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvsmvl +def vdivswsx_vvsmvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I32:$sy, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$vy,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.sx intrisic=vdivswsx_vvsmvl +def vdivswsx_vvImvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.sx $vx,$vy,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvsl +def vdivswzx_vvsl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I32:$sy, I32:$vl), + "vdivs.w.zx $vx,$vy,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvsvl +def vdivswzx_vvsvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I32:$sy, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$vy,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvsl +def vdivswzx_vvIl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, I32:$vl), + "vdivs.w.zx $vx,$vy,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvsvl +def vdivswzx_vvIvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$vy,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvsmvl +def vdivswzx_vvsmvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I32:$sy, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$vy,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVS asm=vdivs.w.zx intrisic=vdivswzx_vvsmvl +def vdivswzx_vvImvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op32:$I, VM:$vm, V64:$vd, I32:$vl), + "vdivs.w.zx $vx,$vy,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvvl +def vdivsl_vvvl : RV<0xfb, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vdivs.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvvvl +def vdivsl_vvvvl : RV<0xfb, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vdivs.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vsvl +def vdivsl_vsvl : RV<0xfb, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vdivs.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vsvvl +def vdivsl_vsvvl : RV<0xfb, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vdivs.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vsvl +def vdivsl_vIvl : RV<0xfb, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vdivs.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vsvvl +def vdivsl_vIvvl : RV<0xfb, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vdivs.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvvmvl +def vdivsl_vvvmvl : RV<0xfb, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vsvmvl +def vdivsl_vsvmvl : RV<0xfb, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vsvmvl +def vdivsl_vIvmvl : RV<0xfb, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vdivs.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvsl +def vdivsl_vvsl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I64:$sy, I32:$vl), + "vdivs.l $vx,$vy,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvsvl +def vdivsl_vvsvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vd, I32:$vl), + "vdivs.l $vx,$vy,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvsl +def vdivsl_vvIl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I32:$vl), + "vdivs.l $vx,$vy,$I", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvsvl +def vdivsl_vvIvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, V64:$vd, I32:$vl), + "vdivs.l $vx,$vy,$I", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvsmvl +def vdivsl_vvsmvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vdivs.l $vx,$vy,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VDVX asm=vdivs.l intrisic=vdivsl_vvsmvl +def vdivsl_vvImvl : RV<0xeb, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, VM:$vm, V64:$vd, I32:$vl), + "vdivs.l $vx,$vy,$I,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vvvl +def vcmpul_vvvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vcmpu.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vvvvl +def vcmpul_vvvvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vcmpu.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vsvl +def vcmpul_vsvl : RV<0xb9, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vcmpu.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vsvvl +def vcmpul_vsvvl : RV<0xb9, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vcmpu.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vsvl +def vcmpul_vIvl : RV<0xb9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vcmpu.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vsvvl +def vcmpul_vIvvl : RV<0xb9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vcmpu.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vvvmvl +def vcmpul_vvvmvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmpu.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vsvmvl +def vcmpul_vsvmvl : RV<0xb9, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmpu.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.l intrisic=vcmpul_vsvmvl +def vcmpul_vIvmvl : RV<0xb9, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmpu.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vvvl +def vcmpuw_vvvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vcmpu.w $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vvvvl +def vcmpuw_vvvvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vcmpu.w $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vsvl +def vcmpuw_vsvl : RV<0xb9, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vcmpu.w $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vsvvl +def vcmpuw_vsvvl : RV<0xb9, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vcmpu.w $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vsvl +def vcmpuw_vIvl : RV<0xb9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vcmpu.w $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vsvvl +def vcmpuw_vIvvl : RV<0xb9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vcmpu.w $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vvvmvl +def vcmpuw_vvvmvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmpu.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vsvmvl +def vcmpuw_vsvmvl : RV<0xb9, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmpu.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=vcmpu.w intrisic=vcmpuw_vsvmvl +def vcmpuw_vIvmvl : RV<0xb9, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmpu.w $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=pvcmpu intrisic=pvcmpu_vvvl +def pvcmpu_vvvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvcmpu $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=pvcmpu intrisic=pvcmpu_vvvvl +def pvcmpu_vvvvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvcmpu $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=pvcmpu intrisic=pvcmpu_vsvl +def pvcmpu_vsvl : RV<0xb9, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvcmpu $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=pvcmpu intrisic=pvcmpu_vsvvl +def pvcmpu_vsvvl : RV<0xb9, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvcmpu $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=pvcmpu intrisic=pvcmpu_vvvMvl +def pvcmpu_vvvMvl : RV<0xb9, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvcmpu $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMP asm=pvcmpu intrisic=pvcmpu_vsvMvl +def pvcmpu_vsvMvl : RV<0xb9, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvcmpu $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vvvl +def vcmpswsx_vvvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vcmps.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vvvvl +def vcmpswsx_vvvvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vcmps.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vsvl +def vcmpswsx_vsvl : RV<0xfa, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vcmps.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vsvvl +def vcmpswsx_vsvvl : RV<0xfa, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vcmps.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vsvl +def vcmpswsx_vIvl : RV<0xfa, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vcmps.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vsvvl +def vcmpswsx_vIvvl : RV<0xfa, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vcmps.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vvvmvl +def vcmpswsx_vvvmvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vsvmvl +def vcmpswsx_vsvmvl : RV<0xfa, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.sx intrisic=vcmpswsx_vsvmvl +def vcmpswsx_vIvmvl : RV<0xfa, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vvvl +def vcmpswzx_vvvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vcmps.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vvvvl +def vcmpswzx_vvvvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vcmps.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vsvl +def vcmpswzx_vsvl : RV<0xfa, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vcmps.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vsvvl +def vcmpswzx_vsvvl : RV<0xfa, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vcmps.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vsvl +def vcmpswzx_vIvl : RV<0xfa, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vcmps.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vsvvl +def vcmpswzx_vIvvl : RV<0xfa, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vcmps.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vvvmvl +def vcmpswzx_vvvmvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vsvmvl +def vcmpswzx_vsvmvl : RV<0xfa, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=vcmps.w.zx intrisic=vcmpswzx_vsvmvl +def vcmpswzx_vIvmvl : RV<0xfa, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=pvcmps intrisic=pvcmps_vvvl +def pvcmps_vvvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvcmps $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=pvcmps intrisic=pvcmps_vvvvl +def pvcmps_vvvvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvcmps $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=pvcmps intrisic=pvcmps_vsvl +def pvcmps_vsvl : RV<0xfa, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvcmps $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=pvcmps intrisic=pvcmps_vsvvl +def pvcmps_vsvvl : RV<0xfa, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvcmps $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=pvcmps intrisic=pvcmps_vvvMvl +def pvcmps_vvvMvl : RV<0xfa, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvcmps $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPS asm=pvcmps intrisic=pvcmps_vsvMvl +def pvcmps_vsvMvl : RV<0xfa, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvcmps $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vvvl +def vcmpsl_vvvl : RV<0xba, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vcmps.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vvvvl +def vcmpsl_vvvvl : RV<0xba, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vcmps.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vsvl +def vcmpsl_vsvl : RV<0xba, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vcmps.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vsvvl +def vcmpsl_vsvvl : RV<0xba, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vcmps.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vsvl +def vcmpsl_vIvl : RV<0xba, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vcmps.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vsvvl +def vcmpsl_vIvvl : RV<0xba, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vcmps.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vvvmvl +def vcmpsl_vvvmvl : RV<0xba, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vsvmvl +def vcmpsl_vsvmvl : RV<0xba, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCPX asm=vcmps.l intrisic=vcmpsl_vsvmvl +def vcmpsl_vIvmvl : RV<0xba, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcmps.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vvvl +def vmaxswsx_vvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmaxs.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vvvvl +def vmaxswsx_vvvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vsvl +def vmaxswsx_vsvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmaxs.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vsvvl +def vmaxswsx_vsvvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vsvl +def vmaxswsx_vIvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmaxs.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vsvvl +def vmaxswsx_vIvvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vvvmvl +def vmaxswsx_vvvmvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vsvmvl +def vmaxswsx_vsvmvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.sx intrisic=vmaxswsx_vsvmvl +def vmaxswsx_vIvmvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vvvl +def vmaxswzx_vvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmaxs.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vvvvl +def vmaxswzx_vvvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vsvl +def vmaxswzx_vsvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmaxs.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vsvvl +def vmaxswzx_vsvvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vsvl +def vmaxswzx_vIvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmaxs.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vsvvl +def vmaxswzx_vIvvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vvvmvl +def vmaxswzx_vvvmvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vsvmvl +def vmaxswzx_vsvmvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmaxs.w.zx intrisic=vmaxswzx_vsvmvl +def vmaxswzx_vIvmvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmaxs intrisic=pvmaxs_vvvl +def pvmaxs_vvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvmaxs $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmaxs intrisic=pvmaxs_vvvvl +def pvmaxs_vvvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvmaxs $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmaxs intrisic=pvmaxs_vsvl +def pvmaxs_vsvl : RV<0x8a, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvmaxs $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmaxs intrisic=pvmaxs_vsvvl +def pvmaxs_vsvvl : RV<0x8a, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvmaxs $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmaxs intrisic=pvmaxs_vvvMvl +def pvmaxs_vvvMvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvmaxs $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmaxs intrisic=pvmaxs_vsvMvl +def pvmaxs_vsvMvl : RV<0x8a, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvmaxs $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vvvl +def vminswsx_vvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmins.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vvvvl +def vminswsx_vvvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmins.w.sx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vsvl +def vminswsx_vsvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmins.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vsvvl +def vminswsx_vsvvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmins.w.sx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vsvl +def vminswsx_vIvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmins.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vsvvl +def vminswsx_vIvvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmins.w.sx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vvvmvl +def vminswsx_vvvmvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.w.sx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vsvmvl +def vminswsx_vsvmvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.w.sx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.sx intrisic=vminswsx_vsvmvl +def vminswsx_vIvmvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.w.sx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vvvl +def vminswzx_vvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmins.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vvvvl +def vminswzx_vvvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmins.w.zx $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vsvl +def vminswzx_vsvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, I32:$vl), + "vmins.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vsvvl +def vminswzx_vsvvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmins.w.zx $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vsvl +def vminswzx_vIvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, I32:$vl), + "vmins.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vsvvl +def vminswzx_vIvvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, V64:$vd, I32:$vl), + "vmins.w.zx $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vvvmvl +def vminswzx_vvvmvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.w.zx $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vsvmvl +def vminswzx_vsvmvl : RV<0x8a, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.w.zx $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=vmins.w.zx intrisic=vminswzx_vsvmvl +def vminswzx_vIvmvl : RV<0x8a, (outs V64:$vx), (ins simm7Op32:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.w.zx $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmins intrisic=pvmins_vvvl +def pvmins_vvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvmins $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmins intrisic=pvmins_vvvvl +def pvmins_vvvvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvmins $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmins intrisic=pvmins_vsvl +def pvmins_vsvl : RV<0x8a, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvmins $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmins intrisic=pvmins_vsvvl +def pvmins_vsvvl : RV<0x8a, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvmins $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmins intrisic=pvmins_vvvMvl +def pvmins_vvvMvl : RV<0x8a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvmins $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMS asm=pvmins intrisic=pvmins_vsvMvl +def pvmins_vsvMvl : RV<0x8a, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvmins $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vvvl +def vmaxsl_vvvl : RV<0x9a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmaxs.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vvvvl +def vmaxsl_vvvvl : RV<0x9a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vsvl +def vmaxsl_vsvl : RV<0x9a, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vmaxs.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vsvvl +def vmaxsl_vsvvl : RV<0x9a, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vsvl +def vmaxsl_vIvl : RV<0x9a, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vmaxs.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vsvvl +def vmaxsl_vIvvl : RV<0x9a, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vmaxs.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vvvmvl +def vmaxsl_vvvmvl : RV<0x9a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vsvmvl +def vmaxsl_vsvmvl : RV<0x9a, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmaxs.l intrisic=vmaxsl_vsvmvl +def vmaxsl_vIvmvl : RV<0x9a, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmaxs.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vvvl +def vminsl_vvvl : RV<0x9a, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vmins.l $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vvvvl +def vminsl_vvvvl : RV<0x9a, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vmins.l $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vsvl +def vminsl_vsvl : RV<0x9a, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vmins.l $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vsvvl +def vminsl_vsvvl : RV<0x9a, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vmins.l $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vsvl +def vminsl_vIvl : RV<0x9a, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, I32:$vl), + "vmins.l $vx,$I,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vsvvl +def vminsl_vIvvl : RV<0x9a, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, V64:$vd, I32:$vl), + "vmins.l $vx,$I,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vvvmvl +def vminsl_vvvmvl : RV<0x9a, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.l $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vsvmvl +def vminsl_vsvmvl : RV<0x9a, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.l $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCMX asm=vmins.l intrisic=vminsl_vsvmvl +def vminsl_vIvmvl : RV<0x9a, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmins.l $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=vand intrisic=vand_vvvl +def vand_vvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vand $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=vand intrisic=vand_vvvvl +def vand_vvvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vand $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=vand intrisic=vand_vsvl +def vand_vsvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vand $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=vand intrisic=vand_vsvvl +def vand_vsvvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vand $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=vand intrisic=vand_vvvmvl +def vand_vvvmvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vand $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=vand intrisic=vand_vsvmvl +def vand_vsvmvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vand $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.lo intrisic=pvandlo_vvvl +def pvandlo_vvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvand.lo $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.lo intrisic=pvandlo_vvvvl +def pvandlo_vvvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvand.lo $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.lo intrisic=pvandlo_vsvl +def pvandlo_vsvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvand.lo $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.lo intrisic=pvandlo_vsvvl +def pvandlo_vsvvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvand.lo $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.lo intrisic=pvandlo_vvvMvl +def pvandlo_vvvMvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvand.lo $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.lo intrisic=pvandlo_vsvMvl +def pvandlo_vsvMvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvand.lo $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.up intrisic=pvandup_vvvl +def pvandup_vvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvand.up $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.up intrisic=pvandup_vvvvl +def pvandup_vvvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvand.up $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.up intrisic=pvandup_vsvl +def pvandup_vsvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvand.up $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.up intrisic=pvandup_vsvvl +def pvandup_vsvvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvand.up $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.up intrisic=pvandup_vvvMvl +def pvandup_vvvMvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvand.up $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand.up intrisic=pvandup_vsvMvl +def pvandup_vsvMvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvand.up $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand intrisic=pvand_vvvl +def pvand_vvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvand $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand intrisic=pvand_vvvvl +def pvand_vvvvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvand $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand intrisic=pvand_vsvl +def pvand_vsvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvand $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand intrisic=pvand_vsvvl +def pvand_vsvvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvand $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand intrisic=pvand_vvvMvl +def pvand_vvvMvl : RV<0xc4, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvand $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VAND asm=pvand intrisic=pvand_vsvMvl +def pvand_vsvMvl : RV<0xc4, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvand $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=vor intrisic=vor_vvvl +def vor_vvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vor $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=vor intrisic=vor_vvvvl +def vor_vvvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vor $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=vor intrisic=vor_vsvl +def vor_vsvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vor $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=vor intrisic=vor_vsvvl +def vor_vsvvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vor $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=vor intrisic=vor_vvvmvl +def vor_vvvmvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vor $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=vor intrisic=vor_vsvmvl +def vor_vsvmvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vor $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.lo intrisic=pvorlo_vvvl +def pvorlo_vvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvor.lo $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.lo intrisic=pvorlo_vvvvl +def pvorlo_vvvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvor.lo $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.lo intrisic=pvorlo_vsvl +def pvorlo_vsvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvor.lo $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.lo intrisic=pvorlo_vsvvl +def pvorlo_vsvvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvor.lo $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.lo intrisic=pvorlo_vvvMvl +def pvorlo_vvvMvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvor.lo $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.lo intrisic=pvorlo_vsvMvl +def pvorlo_vsvMvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvor.lo $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.up intrisic=pvorup_vvvl +def pvorup_vvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvor.up $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.up intrisic=pvorup_vvvvl +def pvorup_vvvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvor.up $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.up intrisic=pvorup_vsvl +def pvorup_vsvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvor.up $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.up intrisic=pvorup_vsvvl +def pvorup_vsvvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvor.up $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.up intrisic=pvorup_vvvMvl +def pvorup_vvvMvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvor.up $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor.up intrisic=pvorup_vsvMvl +def pvorup_vsvMvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvor.up $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor intrisic=pvor_vvvl +def pvor_vvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvor $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor intrisic=pvor_vvvvl +def pvor_vvvvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvor $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor intrisic=pvor_vsvl +def pvor_vsvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvor $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor intrisic=pvor_vsvvl +def pvor_vsvvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvor $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor intrisic=pvor_vvvMvl +def pvor_vvvMvl : RV<0xc5, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvor $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VOR asm=pvor intrisic=pvor_vsvMvl +def pvor_vsvMvl : RV<0xc5, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvor $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=vxor intrisic=vxor_vvvl +def vxor_vvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vxor $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=vxor intrisic=vxor_vvvvl +def vxor_vvvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vxor $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=vxor intrisic=vxor_vsvl +def vxor_vsvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vxor $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=vxor intrisic=vxor_vsvvl +def vxor_vsvvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vxor $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=vxor intrisic=vxor_vvvmvl +def vxor_vvvmvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vxor $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=vxor intrisic=vxor_vsvmvl +def vxor_vsvmvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vxor $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.lo intrisic=pvxorlo_vvvl +def pvxorlo_vvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvxor.lo $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.lo intrisic=pvxorlo_vvvvl +def pvxorlo_vvvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvxor.lo $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.lo intrisic=pvxorlo_vsvl +def pvxorlo_vsvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvxor.lo $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.lo intrisic=pvxorlo_vsvvl +def pvxorlo_vsvvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvxor.lo $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.lo intrisic=pvxorlo_vvvMvl +def pvxorlo_vvvMvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvxor.lo $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.lo intrisic=pvxorlo_vsvMvl +def pvxorlo_vsvMvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvxor.lo $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.up intrisic=pvxorup_vvvl +def pvxorup_vvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvxor.up $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.up intrisic=pvxorup_vvvvl +def pvxorup_vvvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvxor.up $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.up intrisic=pvxorup_vsvl +def pvxorup_vsvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvxor.up $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.up intrisic=pvxorup_vsvvl +def pvxorup_vsvvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvxor.up $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.up intrisic=pvxorup_vvvMvl +def pvxorup_vvvMvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvxor.up $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor.up intrisic=pvxorup_vsvMvl +def pvxorup_vsvMvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvxor.up $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor intrisic=pvxor_vvvl +def pvxor_vvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvxor $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor intrisic=pvxor_vvvvl +def pvxor_vvvvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvxor $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor intrisic=pvxor_vsvl +def pvxor_vsvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvxor $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor intrisic=pvxor_vsvvl +def pvxor_vsvvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvxor $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor intrisic=pvxor_vvvMvl +def pvxor_vvvMvl : RV<0xc6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvxor $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VXOR asm=pvxor intrisic=pvxor_vsvMvl +def pvxor_vsvMvl : RV<0xc6, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvxor $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=veqv intrisic=veqv_vvvl +def veqv_vvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "veqv $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=veqv intrisic=veqv_vvvvl +def veqv_vvvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "veqv $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=veqv intrisic=veqv_vsvl +def veqv_vsvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "veqv $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=veqv intrisic=veqv_vsvvl +def veqv_vsvvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "veqv $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=veqv intrisic=veqv_vvvmvl +def veqv_vvvmvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "veqv $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=veqv intrisic=veqv_vsvmvl +def veqv_vsvmvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "veqv $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.lo intrisic=pveqvlo_vvvl +def pveqvlo_vvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pveqv.lo $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.lo intrisic=pveqvlo_vvvvl +def pveqvlo_vvvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pveqv.lo $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.lo intrisic=pveqvlo_vsvl +def pveqvlo_vsvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pveqv.lo $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.lo intrisic=pveqvlo_vsvvl +def pveqvlo_vsvvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pveqv.lo $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.lo intrisic=pveqvlo_vvvMvl +def pveqvlo_vvvMvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pveqv.lo $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.lo intrisic=pveqvlo_vsvMvl +def pveqvlo_vsvMvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pveqv.lo $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.up intrisic=pveqvup_vvvl +def pveqvup_vvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pveqv.up $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.up intrisic=pveqvup_vvvvl +def pveqvup_vvvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pveqv.up $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.up intrisic=pveqvup_vsvl +def pveqvup_vsvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pveqv.up $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.up intrisic=pveqvup_vsvvl +def pveqvup_vsvvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pveqv.up $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.up intrisic=pveqvup_vvvMvl +def pveqvup_vvvMvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pveqv.up $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv.up intrisic=pveqvup_vsvMvl +def pveqvup_vsvMvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pveqv.up $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv intrisic=pveqv_vvvl +def pveqv_vvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pveqv $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv intrisic=pveqv_vvvvl +def pveqv_vvvvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pveqv $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv intrisic=pveqv_vsvl +def pveqv_vsvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pveqv $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv intrisic=pveqv_vsvvl +def pveqv_vsvvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pveqv $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv intrisic=pveqv_vvvMvl +def pveqv_vvvMvl : RV<0xc7, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pveqv $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEQV asm=pveqv intrisic=pveqv_vsvMvl +def pveqv_vsvMvl : RV<0xc7, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pveqv $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=vseq intrisic=vseq_vl +def vseq_vl : RV<0x99, (outs V64:$vx), (ins I32:$vl), + "vseq $vx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=vseq intrisic=vseq_vvl +def vseq_vvl : RV<0x99, (outs V64:$vx), (ins V64:$vd, I32:$vl), + "vseq $vx", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=pvseq.lo intrisic=pvseqlo_vl +def pvseqlo_vl : RV<0x99, (outs V64:$vx), (ins I32:$vl), + "pvseq.lo $vx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=pvseq.lo intrisic=pvseqlo_vvl +def pvseqlo_vvl : RV<0x99, (outs V64:$vx), (ins V64:$vd, I32:$vl), + "pvseq.lo $vx", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=pvseq.up intrisic=pvsequp_vl +def pvsequp_vl : RV<0x99, (outs V64:$vx), (ins I32:$vl), + "pvseq.up $vx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=pvseq.up intrisic=pvsequp_vvl +def pvsequp_vvl : RV<0x99, (outs V64:$vx), (ins V64:$vd, I32:$vl), + "pvseq.up $vx", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=pvseq intrisic=pvseq_vl +def pvseq_vl : RV<0x99, (outs V64:$vx), (ins I32:$vl), + "pvseq $vx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSEQ asm=pvseq intrisic=pvseq_vvl +def pvseq_vvl : RV<0x99, (outs V64:$vx), (ins V64:$vd, I32:$vl), + "pvseq $vx", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvvl +def vsll_vvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "vsll $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvvvl +def vsll_vvvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "vsll $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvsl +def vsll_vvsl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "vsll $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvsvl +def vsll_vvsvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vsll $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvsl +def vsll_vvIl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, I32:$vl), + "vsll $vx,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvsvl +def vsll_vvIvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vsll $vx,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvvmvl +def vsll_vvvmvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vsll $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvsmvl +def vsll_vvsmvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vsll $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=vsll intrisic=vsll_vvsmvl +def vsll_vvImvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, VM:$vm, V64:$vd, I32:$vl), + "vsll $vx,$vz,$N,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.lo intrisic=pvslllo_vvvl +def pvslllo_vvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsll.lo $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.lo intrisic=pvslllo_vvvvl +def pvslllo_vvvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsll.lo $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.lo intrisic=pvslllo_vvsl +def pvslllo_vvsl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsll.lo $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.lo intrisic=pvslllo_vvsvl +def pvslllo_vvsvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsll.lo $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.lo intrisic=pvslllo_vvvMvl +def pvslllo_vvvMvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsll.lo $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.lo intrisic=pvslllo_vvsMvl +def pvslllo_vvsMvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsll.lo $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.up intrisic=pvsllup_vvvl +def pvsllup_vvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsll.up $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.up intrisic=pvsllup_vvvvl +def pvsllup_vvvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsll.up $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.up intrisic=pvsllup_vvsl +def pvsllup_vvsl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsll.up $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.up intrisic=pvsllup_vvsvl +def pvsllup_vvsvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsll.up $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.up intrisic=pvsllup_vvvMvl +def pvsllup_vvvMvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsll.up $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll.up intrisic=pvsllup_vvsMvl +def pvsllup_vvsMvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsll.up $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll intrisic=pvsll_vvvl +def pvsll_vvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsll $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll intrisic=pvsll_vvvvl +def pvsll_vvvvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsll $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll intrisic=pvsll_vvsl +def pvsll_vvsl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsll $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll intrisic=pvsll_vvsvl +def pvsll_vvsvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsll $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll intrisic=pvsll_vvvMvl +def pvsll_vvvMvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsll $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLL asm=pvsll intrisic=pvsll_vvsMvl +def pvsll_vvsMvl : RV<0xe5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsll $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvvl +def vsrl_vvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "vsrl $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvvvl +def vsrl_vvvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "vsrl $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvsl +def vsrl_vvsl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "vsrl $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvsvl +def vsrl_vvsvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vsrl $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvsl +def vsrl_vvIl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, I32:$vl), + "vsrl $vx,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvsvl +def vsrl_vvIvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vsrl $vx,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvvmvl +def vsrl_vvvmvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vsrl $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvsmvl +def vsrl_vvsmvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vsrl $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=vsrl intrisic=vsrl_vvsmvl +def vsrl_vvImvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, VM:$vm, V64:$vd, I32:$vl), + "vsrl $vx,$vz,$N,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.lo intrisic=pvsrllo_vvvl +def pvsrllo_vvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsrl.lo $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.lo intrisic=pvsrllo_vvvvl +def pvsrllo_vvvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsrl.lo $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.lo intrisic=pvsrllo_vvsl +def pvsrllo_vvsl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsrl.lo $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.lo intrisic=pvsrllo_vvsvl +def pvsrllo_vvsvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsrl.lo $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.lo intrisic=pvsrllo_vvvMvl +def pvsrllo_vvvMvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsrl.lo $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.lo intrisic=pvsrllo_vvsMvl +def pvsrllo_vvsMvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsrl.lo $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.up intrisic=pvsrlup_vvvl +def pvsrlup_vvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsrl.up $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.up intrisic=pvsrlup_vvvvl +def pvsrlup_vvvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsrl.up $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.up intrisic=pvsrlup_vvsl +def pvsrlup_vvsl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsrl.up $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.up intrisic=pvsrlup_vvsvl +def pvsrlup_vvsvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsrl.up $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.up intrisic=pvsrlup_vvvMvl +def pvsrlup_vvvMvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsrl.up $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl.up intrisic=pvsrlup_vvsMvl +def pvsrlup_vvsMvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsrl.up $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl intrisic=pvsrl_vvvl +def pvsrl_vvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsrl $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl intrisic=pvsrl_vvvvl +def pvsrl_vvvvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsrl $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl intrisic=pvsrl_vvsl +def pvsrl_vvsl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsrl $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl intrisic=pvsrl_vvsvl +def pvsrl_vvsvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsrl $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl intrisic=pvsrl_vvvMvl +def pvsrl_vvvMvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsrl $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRL asm=pvsrl intrisic=pvsrl_vvsMvl +def pvsrl_vvsMvl : RV<0xf5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsrl $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvvl +def vslaw_vvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "vsla.w $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvvvl +def vslaw_vvvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "vsla.w $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvsl +def vslaw_vvsl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "vsla.w $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvsvl +def vslaw_vvsvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vsla.w $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvsl +def vslaw_vvIl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, I32:$vl), + "vsla.w $vx,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvsvl +def vslaw_vvIvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vsla.w $vx,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvvmvl +def vslaw_vvvmvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vsla.w $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvsmvl +def vslaw_vvsmvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vsla.w $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=vsla.w intrisic=vslaw_vvsmvl +def vslaw_vvImvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, VM:$vm, V64:$vd, I32:$vl), + "vsla.w $vx,$vz,$N,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.lo intrisic=pvslalo_vvvl +def pvslalo_vvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsla.lo $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.lo intrisic=pvslalo_vvvvl +def pvslalo_vvvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsla.lo $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.lo intrisic=pvslalo_vvsl +def pvslalo_vvsl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsla.lo $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.lo intrisic=pvslalo_vvsvl +def pvslalo_vvsvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsla.lo $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.lo intrisic=pvslalo_vvvMvl +def pvslalo_vvvMvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsla.lo $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.lo intrisic=pvslalo_vvsMvl +def pvslalo_vvsMvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsla.lo $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.up intrisic=pvslaup_vvvl +def pvslaup_vvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsla.up $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.up intrisic=pvslaup_vvvvl +def pvslaup_vvvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsla.up $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.up intrisic=pvslaup_vvsl +def pvslaup_vvsl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsla.up $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.up intrisic=pvslaup_vvsvl +def pvslaup_vvsvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsla.up $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.up intrisic=pvslaup_vvvMvl +def pvslaup_vvvMvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsla.up $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla.up intrisic=pvslaup_vvsMvl +def pvslaup_vvsMvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsla.up $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla intrisic=pvsla_vvvl +def pvsla_vvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsla $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla intrisic=pvsla_vvvvl +def pvsla_vvvvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsla $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla intrisic=pvsla_vvsl +def pvsla_vvsl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsla $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla intrisic=pvsla_vvsvl +def pvsla_vvsvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsla $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla intrisic=pvsla_vvvMvl +def pvsla_vvvMvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsla $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLA asm=pvsla intrisic=pvsla_vvsMvl +def pvsla_vvsMvl : RV<0xe6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsla $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvvl +def vslal_vvvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "vsla.l $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvvvl +def vslal_vvvvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "vsla.l $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvsl +def vslal_vvsl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "vsla.l $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvsvl +def vslal_vvsvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vsla.l $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvsl +def vslal_vvIl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, I32:$vl), + "vsla.l $vx,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvsvl +def vslal_vvIvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vsla.l $vx,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvvmvl +def vslal_vvvmvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vsla.l $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvsmvl +def vslal_vvsmvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vsla.l $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSLAX asm=vsla.l intrisic=vslal_vvsmvl +def vslal_vvImvl : RV<0xd4, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, VM:$vm, V64:$vd, I32:$vl), + "vsla.l $vx,$vz,$N,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvvl +def vsraw_vvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "vsra.w $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvvvl +def vsraw_vvvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "vsra.w $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvsl +def vsraw_vvsl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "vsra.w $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvsvl +def vsraw_vvsvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vsra.w $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvsl +def vsraw_vvIl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, I32:$vl), + "vsra.w $vx,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvsvl +def vsraw_vvIvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vsra.w $vx,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvvmvl +def vsraw_vvvmvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vsra.w $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvsmvl +def vsraw_vvsmvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vsra.w $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=vsra.w intrisic=vsraw_vvsmvl +def vsraw_vvImvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, VM:$vm, V64:$vd, I32:$vl), + "vsra.w $vx,$vz,$N,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.lo intrisic=pvsralo_vvvl +def pvsralo_vvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsra.lo $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.lo intrisic=pvsralo_vvvvl +def pvsralo_vvvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsra.lo $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.lo intrisic=pvsralo_vvsl +def pvsralo_vvsl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsra.lo $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.lo intrisic=pvsralo_vvsvl +def pvsralo_vvsvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsra.lo $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.lo intrisic=pvsralo_vvvMvl +def pvsralo_vvvMvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsra.lo $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.lo intrisic=pvsralo_vvsMvl +def pvsralo_vvsMvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsra.lo $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.up intrisic=pvsraup_vvvl +def pvsraup_vvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsra.up $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.up intrisic=pvsraup_vvvvl +def pvsraup_vvvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsra.up $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.up intrisic=pvsraup_vvsl +def pvsraup_vvsl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsra.up $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.up intrisic=pvsraup_vvsvl +def pvsraup_vvsvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsra.up $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.up intrisic=pvsraup_vvvMvl +def pvsraup_vvvMvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsra.up $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra.up intrisic=pvsraup_vvsMvl +def pvsraup_vvsMvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsra.up $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra intrisic=pvsra_vvvl +def pvsra_vvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "pvsra $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra intrisic=pvsra_vvvvl +def pvsra_vvvvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "pvsra $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra intrisic=pvsra_vvsl +def pvsra_vvsl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "pvsra $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra intrisic=pvsra_vvsvl +def pvsra_vvsvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "pvsra $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra intrisic=pvsra_vvvMvl +def pvsra_vvvMvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvsra $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRA asm=pvsra intrisic=pvsra_vvsMvl +def pvsra_vvsMvl : RV<0xf6, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM512:$vm, V64:$vd, I32:$vl), + "pvsra $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvvl +def vsral_vvvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, V64:$vy, I32:$vl), + "vsra.l $vx,$vz,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvvvl +def vsral_vvvvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, V64:$vy, V64:$vd, I32:$vl), + "vsra.l $vx,$vz,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvsl +def vsral_vvsl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, I64:$sy, I32:$vl), + "vsra.l $vx,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvsvl +def vsral_vvsvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vsra.l $vx,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvsl +def vsral_vvIl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, I32:$vl), + "vsra.l $vx,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvsvl +def vsral_vvIvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vsra.l $vx,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvvmvl +def vsral_vvvmvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vsra.l $vx,$vz,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvsmvl +def vsral_vvsmvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, I64:$sy, VM:$vm, V64:$vd, I32:$vl), + "vsra.l $vx,$vz,$sy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSRAX asm=vsra.l intrisic=vsral_vvsmvl +def vsral_vvImvl : RV<0xd5, (outs V64:$vx), (ins V64:$vz, simm7Op64:$N, VM:$vm, V64:$vd, I32:$vl), + "vsra.l $vx,$vz,$N,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSFA asm=vsfa intrisic=vsfa_vvssl +def vsfa_vvssl : RV<0xd7, (outs V64:$vx), (ins V64:$vz, I64:$sy, I64:$sz, I32:$vl), + "vsfa $vx,$vz,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSFA asm=vsfa intrisic=vsfa_vvssvl +def vsfa_vvssvl : RV<0xd7, (outs V64:$vx), (ins V64:$vz, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vsfa $vx,$vz,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSFA asm=vsfa intrisic=vsfa_vvssl +def vsfa_vvIsl : RV<0xd7, (outs V64:$vx), (ins V64:$vz, simm7Op64:$I, I64:$sz, I32:$vl), + "vsfa $vx,$vz,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSFA asm=vsfa intrisic=vsfa_vvssvl +def vsfa_vvIsvl : RV<0xd7, (outs V64:$vx), (ins V64:$vz, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vsfa $vx,$vz,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSFA asm=vsfa intrisic=vsfa_vvssmvl +def vsfa_vvssmvl : RV<0xd7, (outs V64:$vx), (ins V64:$vz, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vsfa $vx,$vz,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSFA asm=vsfa intrisic=vsfa_vvssmvl +def vsfa_vvIsmvl : RV<0xd7, (outs V64:$vx), (ins V64:$vz, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vsfa $vx,$vz,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.d intrisic=vfaddd_vvvl +def vfaddd_vvvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfadd.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.d intrisic=vfaddd_vvvvl +def vfaddd_vvvvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfadd.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.d intrisic=vfaddd_vsvl +def vfaddd_vsvl : RV<0xcc, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfadd.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.d intrisic=vfaddd_vsvvl +def vfaddd_vsvvl : RV<0xcc, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfadd.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.d intrisic=vfaddd_vvvmvl +def vfaddd_vvvmvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfadd.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.d intrisic=vfaddd_vsvmvl +def vfaddd_vsvmvl : RV<0xcc, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfadd.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.s intrisic=vfadds_vvvl +def vfadds_vvvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfadd.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.s intrisic=vfadds_vvvvl +def vfadds_vvvvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfadd.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.s intrisic=vfadds_vsvl +def vfadds_vsvl : RV<0xcc, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfadd.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.s intrisic=vfadds_vsvvl +def vfadds_vsvvl : RV<0xcc, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfadd.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.s intrisic=vfadds_vvvmvl +def vfadds_vvvmvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfadd.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=vfadd.s intrisic=vfadds_vsvmvl +def vfadds_vsvmvl : RV<0xcc, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfadd.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=pvfadd intrisic=pvfadd_vvvl +def pvfadd_vvvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvfadd $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=pvfadd intrisic=pvfadd_vvvvl +def pvfadd_vvvvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvfadd $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=pvfadd intrisic=pvfadd_vsvl +def pvfadd_vsvl : RV<0xcc, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvfadd $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=pvfadd intrisic=pvfadd_vsvvl +def pvfadd_vsvvl : RV<0xcc, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvfadd $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=pvfadd intrisic=pvfadd_vvvMvl +def pvfadd_vvvMvl : RV<0xcc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfadd $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFAD asm=pvfadd intrisic=pvfadd_vsvMvl +def pvfadd_vsvMvl : RV<0xcc, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfadd $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.d intrisic=vfsubd_vvvl +def vfsubd_vvvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfsub.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.d intrisic=vfsubd_vvvvl +def vfsubd_vvvvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfsub.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.d intrisic=vfsubd_vsvl +def vfsubd_vsvl : RV<0xdc, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfsub.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.d intrisic=vfsubd_vsvvl +def vfsubd_vsvvl : RV<0xdc, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfsub.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.d intrisic=vfsubd_vvvmvl +def vfsubd_vvvmvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfsub.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.d intrisic=vfsubd_vsvmvl +def vfsubd_vsvmvl : RV<0xdc, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfsub.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.s intrisic=vfsubs_vvvl +def vfsubs_vvvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfsub.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.s intrisic=vfsubs_vvvvl +def vfsubs_vvvvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfsub.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.s intrisic=vfsubs_vsvl +def vfsubs_vsvl : RV<0xdc, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfsub.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.s intrisic=vfsubs_vsvvl +def vfsubs_vsvvl : RV<0xdc, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfsub.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.s intrisic=vfsubs_vvvmvl +def vfsubs_vvvmvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfsub.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=vfsub.s intrisic=vfsubs_vsvmvl +def vfsubs_vsvmvl : RV<0xdc, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfsub.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=pvfsub intrisic=pvfsub_vvvl +def pvfsub_vvvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvfsub $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=pvfsub intrisic=pvfsub_vvvvl +def pvfsub_vvvvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvfsub $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=pvfsub intrisic=pvfsub_vsvl +def pvfsub_vsvl : RV<0xdc, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvfsub $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=pvfsub intrisic=pvfsub_vsvvl +def pvfsub_vsvvl : RV<0xdc, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvfsub $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=pvfsub intrisic=pvfsub_vvvMvl +def pvfsub_vvvMvl : RV<0xdc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfsub $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSB asm=pvfsub intrisic=pvfsub_vsvMvl +def pvfsub_vsvMvl : RV<0xdc, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfsub $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.d intrisic=vfmuld_vvvl +def vfmuld_vvvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfmul.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.d intrisic=vfmuld_vvvvl +def vfmuld_vvvvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfmul.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.d intrisic=vfmuld_vsvl +def vfmuld_vsvl : RV<0xcd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfmul.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.d intrisic=vfmuld_vsvvl +def vfmuld_vsvvl : RV<0xcd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfmul.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.d intrisic=vfmuld_vvvmvl +def vfmuld_vvvmvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmul.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.d intrisic=vfmuld_vsvmvl +def vfmuld_vsvmvl : RV<0xcd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmul.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.s intrisic=vfmuls_vvvl +def vfmuls_vvvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfmul.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.s intrisic=vfmuls_vvvvl +def vfmuls_vvvvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfmul.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.s intrisic=vfmuls_vsvl +def vfmuls_vsvl : RV<0xcd, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfmul.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.s intrisic=vfmuls_vsvvl +def vfmuls_vsvvl : RV<0xcd, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfmul.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.s intrisic=vfmuls_vvvmvl +def vfmuls_vvvmvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmul.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=vfmul.s intrisic=vfmuls_vsvmvl +def vfmuls_vsvmvl : RV<0xcd, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmul.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=pvfmul intrisic=pvfmul_vvvl +def pvfmul_vvvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvfmul $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=pvfmul intrisic=pvfmul_vvvvl +def pvfmul_vvvvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvfmul $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=pvfmul intrisic=pvfmul_vsvl +def pvfmul_vsvl : RV<0xcd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvfmul $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=pvfmul intrisic=pvfmul_vsvvl +def pvfmul_vsvvl : RV<0xcd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvfmul $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=pvfmul intrisic=pvfmul_vvvMvl +def pvfmul_vvvMvl : RV<0xcd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfmul $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMP asm=pvfmul intrisic=pvfmul_vsvMvl +def pvfmul_vsvMvl : RV<0xcd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfmul $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.d intrisic=vfdivd_vvvl +def vfdivd_vvvl : RV<0xdd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfdiv.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.d intrisic=vfdivd_vvvvl +def vfdivd_vvvvl : RV<0xdd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfdiv.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.d intrisic=vfdivd_vsvl +def vfdivd_vsvl : RV<0xdd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfdiv.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.d intrisic=vfdivd_vsvvl +def vfdivd_vsvvl : RV<0xdd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfdiv.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.d intrisic=vfdivd_vvvmvl +def vfdivd_vvvmvl : RV<0xdd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfdiv.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.d intrisic=vfdivd_vsvmvl +def vfdivd_vsvmvl : RV<0xdd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfdiv.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.s intrisic=vfdivs_vvvl +def vfdivs_vvvl : RV<0xdd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfdiv.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.s intrisic=vfdivs_vvvvl +def vfdivs_vvvvl : RV<0xdd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfdiv.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.s intrisic=vfdivs_vsvl +def vfdivs_vsvl : RV<0xdd, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfdiv.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.s intrisic=vfdivs_vsvvl +def vfdivs_vsvvl : RV<0xdd, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfdiv.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.s intrisic=vfdivs_vvvmvl +def vfdivs_vvvmvl : RV<0xdd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfdiv.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFDV asm=vfdiv.s intrisic=vfdivs_vsvmvl +def vfdivs_vsvmvl : RV<0xdd, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfdiv.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSQRT asm=vfsqrt.d intrisic=vfsqrtd_vvl +def vfsqrtd_vvl : RV<0xed, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfsqrt.d $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSQRT asm=vfsqrt.d intrisic=vfsqrtd_vvvl +def vfsqrtd_vvvl : RV<0xed, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfsqrt.d $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSQRT asm=vfsqrt.s intrisic=vfsqrts_vvl +def vfsqrts_vvl : RV<0xed, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfsqrt.s $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSQRT asm=vfsqrt.s intrisic=vfsqrts_vvvl +def vfsqrts_vvvl : RV<0xed, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfsqrt.s $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.d intrisic=vfcmpd_vvvl +def vfcmpd_vvvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfcmp.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.d intrisic=vfcmpd_vvvvl +def vfcmpd_vvvvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfcmp.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.d intrisic=vfcmpd_vsvl +def vfcmpd_vsvl : RV<0xfc, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfcmp.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.d intrisic=vfcmpd_vsvvl +def vfcmpd_vsvvl : RV<0xfc, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfcmp.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.d intrisic=vfcmpd_vvvmvl +def vfcmpd_vvvmvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfcmp.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.d intrisic=vfcmpd_vsvmvl +def vfcmpd_vsvmvl : RV<0xfc, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfcmp.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.s intrisic=vfcmps_vvvl +def vfcmps_vvvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfcmp.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.s intrisic=vfcmps_vvvvl +def vfcmps_vvvvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfcmp.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.s intrisic=vfcmps_vsvl +def vfcmps_vsvl : RV<0xfc, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfcmp.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.s intrisic=vfcmps_vsvvl +def vfcmps_vsvvl : RV<0xfc, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfcmp.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.s intrisic=vfcmps_vvvmvl +def vfcmps_vvvmvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfcmp.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=vfcmp.s intrisic=vfcmps_vsvmvl +def vfcmps_vsvmvl : RV<0xfc, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfcmp.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=pvfcmp intrisic=pvfcmp_vvvl +def pvfcmp_vvvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvfcmp $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=pvfcmp intrisic=pvfcmp_vvvvl +def pvfcmp_vvvvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvfcmp $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=pvfcmp intrisic=pvfcmp_vsvl +def pvfcmp_vsvl : RV<0xfc, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvfcmp $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=pvfcmp intrisic=pvfcmp_vsvvl +def pvfcmp_vsvvl : RV<0xfc, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvfcmp $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=pvfcmp intrisic=pvfcmp_vvvMvl +def pvfcmp_vvvMvl : RV<0xfc, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfcmp $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCP asm=pvfcmp intrisic=pvfcmp_vsvMvl +def pvfcmp_vsvMvl : RV<0xfc, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfcmp $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.d intrisic=vfmaxd_vvvl +def vfmaxd_vvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfmax.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.d intrisic=vfmaxd_vvvvl +def vfmaxd_vvvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfmax.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.d intrisic=vfmaxd_vsvl +def vfmaxd_vsvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfmax.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.d intrisic=vfmaxd_vsvvl +def vfmaxd_vsvvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfmax.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.d intrisic=vfmaxd_vvvmvl +def vfmaxd_vvvmvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmax.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.d intrisic=vfmaxd_vsvmvl +def vfmaxd_vsvmvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmax.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.s intrisic=vfmaxs_vvvl +def vfmaxs_vvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfmax.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.s intrisic=vfmaxs_vvvvl +def vfmaxs_vvvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfmax.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.s intrisic=vfmaxs_vsvl +def vfmaxs_vsvl : RV<0xbd, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfmax.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.s intrisic=vfmaxs_vsvvl +def vfmaxs_vsvvl : RV<0xbd, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfmax.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.s intrisic=vfmaxs_vvvmvl +def vfmaxs_vvvmvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmax.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmax.s intrisic=vfmaxs_vsvmvl +def vfmaxs_vsvmvl : RV<0xbd, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmax.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmax intrisic=pvfmax_vvvl +def pvfmax_vvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvfmax $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmax intrisic=pvfmax_vvvvl +def pvfmax_vvvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvfmax $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmax intrisic=pvfmax_vsvl +def pvfmax_vsvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvfmax $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmax intrisic=pvfmax_vsvvl +def pvfmax_vsvvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvfmax $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmax intrisic=pvfmax_vvvMvl +def pvfmax_vvvMvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfmax $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmax intrisic=pvfmax_vsvMvl +def pvfmax_vsvMvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfmax $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.d intrisic=vfmind_vvvl +def vfmind_vvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfmin.d $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.d intrisic=vfmind_vvvvl +def vfmind_vvvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfmin.d $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.d intrisic=vfmind_vsvl +def vfmind_vsvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "vfmin.d $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.d intrisic=vfmind_vsvvl +def vfmind_vsvvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfmin.d $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.d intrisic=vfmind_vvvmvl +def vfmind_vvvmvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmin.d $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.d intrisic=vfmind_vsvmvl +def vfmind_vsvmvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmin.d $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.s intrisic=vfmins_vvvl +def vfmins_vvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "vfmin.s $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.s intrisic=vfmins_vvvvl +def vfmins_vvvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "vfmin.s $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.s intrisic=vfmins_vsvl +def vfmins_vsvl : RV<0xbd, (outs V64:$vx), (ins F32:$sy, V64:$vz, I32:$vl), + "vfmin.s $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.s intrisic=vfmins_vsvvl +def vfmins_vsvvl : RV<0xbd, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vd, I32:$vl), + "vfmin.s $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.s intrisic=vfmins_vvvmvl +def vfmins_vvvmvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmin.s $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=vfmin.s intrisic=vfmins_vsvmvl +def vfmins_vsvmvl : RV<0xbd, (outs V64:$vx), (ins F32:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vfmin.s $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmin intrisic=pvfmin_vvvl +def pvfmin_vvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, I32:$vl), + "pvfmin $vx,$vy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmin intrisic=pvfmin_vvvvl +def pvfmin_vvvvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vd, I32:$vl), + "pvfmin $vx,$vy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmin intrisic=pvfmin_vsvl +def pvfmin_vsvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, I32:$vl), + "pvfmin $vx,$sy,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmin intrisic=pvfmin_vsvvl +def pvfmin_vsvvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vd, I32:$vl), + "pvfmin $vx,$sy,$vz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmin intrisic=pvfmin_vvvMvl +def pvfmin_vvvMvl : RV<0xbd, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfmin $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFCM asm=pvfmin intrisic=pvfmin_vsvMvl +def pvfmin_vsvMvl : RV<0xbd, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "pvfmin $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vvvvl +def vfmadd_vvvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfmad.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vvvvvl +def vfmadd_vvvvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmad.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vsvvl +def vfmadd_vsvvl : RV<0xe2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfmad.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vsvvvl +def vfmadd_vsvvvl : RV<0xe2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmad.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vvsvl +def vfmadd_vvsvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "vfmad.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vvsvvl +def vfmadd_vvsvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfmad.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vvvvmvl +def vfmadd_vvvvmvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmad.d $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vsvvmvl +def vfmadd_vsvvmvl : RV<0xe2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmad.d $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.d intrisic=vfmadd_vvsvmvl +def vfmadd_vvsvmvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmad.d $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vvvvl +def vfmads_vvvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfmad.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vvvvvl +def vfmads_vvvvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmad.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vsvvl +def vfmads_vsvvl : RV<0xe2, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfmad.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vsvvvl +def vfmads_vsvvvl : RV<0xe2, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmad.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vvsvl +def vfmads_vvsvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, I32:$vl), + "vfmad.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vvsvvl +def vfmads_vvsvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfmad.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vvvvmvl +def vfmads_vvvvmvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmad.s $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vsvvmvl +def vfmads_vsvvmvl : RV<0xe2, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmad.s $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=vfmad.s intrisic=vfmads_vvsvmvl +def vfmads_vvsvmvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmad.s $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vvvvl +def pvfmad_vvvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "pvfmad $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vvvvvl +def pvfmad_vvvvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfmad $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vsvvl +def pvfmad_vsvvl : RV<0xe2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "pvfmad $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vsvvvl +def pvfmad_vsvvvl : RV<0xe2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfmad $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vvsvl +def pvfmad_vvsvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "pvfmad $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vvsvvl +def pvfmad_vvsvvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "pvfmad $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vvvvMvl +def pvfmad_vvvvMvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfmad $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vsvvMvl +def pvfmad_vsvvMvl : RV<0xe2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfmad $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAD asm=pvfmad intrisic=pvfmad_vvsvMvl +def pvfmad_vvsvMvl : RV<0xe2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfmad $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vvvvl +def vfmsbd_vvvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfmsb.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vvvvvl +def vfmsbd_vvvvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmsb.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vsvvl +def vfmsbd_vsvvl : RV<0xf2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfmsb.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vsvvvl +def vfmsbd_vsvvvl : RV<0xf2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmsb.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vvsvl +def vfmsbd_vvsvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "vfmsb.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vvsvvl +def vfmsbd_vvsvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfmsb.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vvvvmvl +def vfmsbd_vvvvmvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmsb.d $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vsvvmvl +def vfmsbd_vsvvmvl : RV<0xf2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmsb.d $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.d intrisic=vfmsbd_vvsvmvl +def vfmsbd_vvsvmvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmsb.d $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vvvvl +def vfmsbs_vvvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfmsb.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vvvvvl +def vfmsbs_vvvvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmsb.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vsvvl +def vfmsbs_vsvvl : RV<0xf2, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfmsb.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vsvvvl +def vfmsbs_vsvvvl : RV<0xf2, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfmsb.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vvsvl +def vfmsbs_vvsvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, I32:$vl), + "vfmsb.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vvsvvl +def vfmsbs_vvsvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfmsb.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vvvvmvl +def vfmsbs_vvvvmvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmsb.s $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vsvvmvl +def vfmsbs_vsvvmvl : RV<0xf2, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmsb.s $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=vfmsb.s intrisic=vfmsbs_vvsvmvl +def vfmsbs_vvsvmvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfmsb.s $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vvvvl +def pvfmsb_vvvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "pvfmsb $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vvvvvl +def pvfmsb_vvvvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfmsb $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vsvvl +def pvfmsb_vsvvl : RV<0xf2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "pvfmsb $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vsvvvl +def pvfmsb_vsvvvl : RV<0xf2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfmsb $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vvsvl +def pvfmsb_vvsvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "pvfmsb $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vvsvvl +def pvfmsb_vvsvvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "pvfmsb $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vvvvMvl +def pvfmsb_vvvvMvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfmsb $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vsvvMvl +def pvfmsb_vsvvMvl : RV<0xf2, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfmsb $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMSB asm=pvfmsb intrisic=pvfmsb_vvsvMvl +def pvfmsb_vvsvMvl : RV<0xf2, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfmsb $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vvvvl +def vfnmadd_vvvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfnmad.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vvvvvl +def vfnmadd_vvvvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmad.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vsvvl +def vfnmadd_vsvvl : RV<0xe3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfnmad.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vsvvvl +def vfnmadd_vsvvvl : RV<0xe3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmad.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vvsvl +def vfnmadd_vvsvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "vfnmad.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vvsvvl +def vfnmadd_vvsvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfnmad.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vvvvmvl +def vfnmadd_vvvvmvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmad.d $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vsvvmvl +def vfnmadd_vsvvmvl : RV<0xe3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmad.d $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.d intrisic=vfnmadd_vvsvmvl +def vfnmadd_vvsvmvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmad.d $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vvvvl +def vfnmads_vvvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfnmad.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vvvvvl +def vfnmads_vvvvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmad.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vsvvl +def vfnmads_vsvvl : RV<0xe3, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfnmad.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vsvvvl +def vfnmads_vsvvvl : RV<0xe3, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmad.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vvsvl +def vfnmads_vvsvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, I32:$vl), + "vfnmad.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vvsvvl +def vfnmads_vvsvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfnmad.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vvvvmvl +def vfnmads_vvvvmvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmad.s $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vsvvmvl +def vfnmads_vsvvmvl : RV<0xe3, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmad.s $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=vfnmad.s intrisic=vfnmads_vvsvmvl +def vfnmads_vvsvmvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmad.s $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vvvvl +def pvfnmad_vvvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "pvfnmad $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vvvvvl +def pvfnmad_vvvvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfnmad $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vsvvl +def pvfnmad_vsvvl : RV<0xe3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "pvfnmad $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vsvvvl +def pvfnmad_vsvvvl : RV<0xe3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfnmad $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vvsvl +def pvfnmad_vvsvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "pvfnmad $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vvsvvl +def pvfnmad_vvsvvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "pvfnmad $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vvvvMvl +def pvfnmad_vvvvMvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfnmad $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vsvvMvl +def pvfnmad_vsvvMvl : RV<0xe3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfnmad $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMAD asm=pvfnmad intrisic=pvfnmad_vvsvMvl +def pvfnmad_vvsvMvl : RV<0xe3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfnmad $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vvvvl +def vfnmsbd_vvvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfnmsb.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vvvvvl +def vfnmsbd_vvvvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmsb.d $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vsvvl +def vfnmsbd_vsvvl : RV<0xf3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfnmsb.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vsvvvl +def vfnmsbd_vsvvvl : RV<0xf3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmsb.d $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vvsvl +def vfnmsbd_vvsvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "vfnmsb.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vvsvvl +def vfnmsbd_vvsvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfnmsb.d $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vvvvmvl +def vfnmsbd_vvvvmvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmsb.d $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vsvvmvl +def vfnmsbd_vsvvmvl : RV<0xf3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmsb.d $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.d intrisic=vfnmsbd_vvsvmvl +def vfnmsbd_vvsvmvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmsb.d $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vvvvl +def vfnmsbs_vvvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "vfnmsb.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vvvvvl +def vfnmsbs_vvvvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmsb.s $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vsvvl +def vfnmsbs_vsvvl : RV<0xf3, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, I32:$vl), + "vfnmsb.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vsvvvl +def vfnmsbs_vsvvvl : RV<0xf3, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "vfnmsb.s $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vvsvl +def vfnmsbs_vvsvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, I32:$vl), + "vfnmsb.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vvsvvl +def vfnmsbs_vvsvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, V64:$vd, I32:$vl), + "vfnmsb.s $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vvvvmvl +def vfnmsbs_vvvvmvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmsb.s $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vsvvmvl +def vfnmsbs_vsvvmvl : RV<0xf3, (outs V64:$vx), (ins F32:$sy, V64:$vz, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmsb.s $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=vfnmsb.s intrisic=vfnmsbs_vvsvmvl +def vfnmsbs_vvsvmvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, F32:$sy, V64:$vw, VM:$vm, V64:$vd, I32:$vl), + "vfnmsb.s $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vvvvl +def pvfnmsb_vvvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, I32:$vl), + "pvfnmsb $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vvvvvl +def pvfnmsb_vvvvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfnmsb $vx,$vy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vsvvl +def pvfnmsb_vsvvl : RV<0xf3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, I32:$vl), + "pvfnmsb $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vsvvvl +def pvfnmsb_vsvvvl : RV<0xf3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, V64:$vd, I32:$vl), + "pvfnmsb $vx,$sy,$vz,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vvsvl +def pvfnmsb_vvsvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, I32:$vl), + "pvfnmsb $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vvsvvl +def pvfnmsb_vvsvvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, V64:$vd, I32:$vl), + "pvfnmsb $vx,$vy,$sy,$vw", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vvvvMvl +def pvfnmsb_vvvvMvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfnmsb $vx,$vy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vsvvMvl +def pvfnmsb_vsvvMvl : RV<0xf3, (outs V64:$vx), (ins I64:$sy, V64:$vz, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfnmsb $vx,$sy,$vz,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFNMSB asm=pvfnmsb intrisic=pvfnmsb_vvsvMvl +def pvfnmsb_vvsvMvl : RV<0xf3, (outs V64:$vx), (ins V64:$vy, I64:$sy, V64:$vw, VM512:$vm, V64:$vd, I32:$vl), + "pvfnmsb $vx,$vy,$sy,$vw,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRCP asm=vrcp.d intrisic=vrcpd_vvl +def vrcpd_vvl : RV<0xe1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrcp.d $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRCP asm=vrcp.d intrisic=vrcpd_vvvl +def vrcpd_vvvl : RV<0xe1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrcp.d $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRCP asm=vrcp.s intrisic=vrcps_vvl +def vrcps_vvl : RV<0xe1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrcp.s $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRCP asm=vrcp.s intrisic=vrcps_vvvl +def vrcps_vvvl : RV<0xe1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrcp.s $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRCP asm=pvrcp intrisic=pvrcp_vvl +def pvrcp_vvl : RV<0xe1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "pvrcp $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRCP asm=pvrcp intrisic=pvrcp_vvvl +def pvrcp_vvvl : RV<0xe1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "pvrcp $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.d intrisic=vrsqrtd_vvl +def vrsqrtd_vvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrsqrt.d $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.d intrisic=vrsqrtd_vvvl +def vrsqrtd_vvvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrsqrt.d $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.s intrisic=vrsqrts_vvl +def vrsqrts_vvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrsqrt.s $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.s intrisic=vrsqrts_vvvl +def vrsqrts_vvvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrsqrt.s $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=pvrsqrt intrisic=pvrsqrt_vvl +def pvrsqrt_vvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "pvrsqrt $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=pvrsqrt intrisic=pvrsqrt_vvvl +def pvrsqrt_vvvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "pvrsqrt $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.d.nex intrisic=vrsqrtdnex_vvl +def vrsqrtdnex_vvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrsqrt.d.nex $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.d.nex intrisic=vrsqrtdnex_vvvl +def vrsqrtdnex_vvvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrsqrt.d.nex $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.s.nex intrisic=vrsqrtsnex_vvl +def vrsqrtsnex_vvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrsqrt.s.nex $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=vrsqrt.s.nex intrisic=vrsqrtsnex_vvvl +def vrsqrtsnex_vvvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrsqrt.s.nex $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=pvrsqrt.nex intrisic=pvrsqrtnex_vvl +def pvrsqrtnex_vvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "pvrsqrt.nex $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRSQRT asm=pvrsqrt.nex intrisic=pvrsqrtnex_vvvl +def pvrsqrtnex_vvvl : RV<0xf1, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "pvrsqrt.nex $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.sx intrisic=vcvtwdsx_vvl +def vcvtwdsx_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.d.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.sx intrisic=vcvtwdsx_vvvl +def vcvtwdsx_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.d.sx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.sx intrisic=vcvtwdsx_vvmvl +def vcvtwdsx_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.d.sx $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.sx.rz intrisic=vcvtwdsxrz_vvl +def vcvtwdsxrz_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.d.sx.rz $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.sx.rz intrisic=vcvtwdsxrz_vvvl +def vcvtwdsxrz_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.d.sx.rz $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.sx.rz intrisic=vcvtwdsxrz_vvmvl +def vcvtwdsxrz_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.d.sx.rz $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.zx intrisic=vcvtwdzx_vvl +def vcvtwdzx_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.d.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.zx intrisic=vcvtwdzx_vvvl +def vcvtwdzx_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.d.zx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.zx intrisic=vcvtwdzx_vvmvl +def vcvtwdzx_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.d.zx $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.zx.rz intrisic=vcvtwdzxrz_vvl +def vcvtwdzxrz_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.d.zx.rz $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.zx.rz intrisic=vcvtwdzxrz_vvvl +def vcvtwdzxrz_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.d.zx.rz $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.d.zx.rz intrisic=vcvtwdzxrz_vvmvl +def vcvtwdzxrz_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.d.zx.rz $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.sx intrisic=vcvtwssx_vvl +def vcvtwssx_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.s.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.sx intrisic=vcvtwssx_vvvl +def vcvtwssx_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.s.sx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.sx intrisic=vcvtwssx_vvmvl +def vcvtwssx_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.s.sx $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.sx.rz intrisic=vcvtwssxrz_vvl +def vcvtwssxrz_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.s.sx.rz $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.sx.rz intrisic=vcvtwssxrz_vvvl +def vcvtwssxrz_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.s.sx.rz $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.sx.rz intrisic=vcvtwssxrz_vvmvl +def vcvtwssxrz_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.s.sx.rz $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.zx intrisic=vcvtwszx_vvl +def vcvtwszx_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.s.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.zx intrisic=vcvtwszx_vvvl +def vcvtwszx_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.s.zx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.zx intrisic=vcvtwszx_vvmvl +def vcvtwszx_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.s.zx $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.zx.rz intrisic=vcvtwszxrz_vvl +def vcvtwszxrz_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.w.s.zx.rz $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.zx.rz intrisic=vcvtwszxrz_vvvl +def vcvtwszxrz_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.w.s.zx.rz $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=vcvt.w.s.zx.rz intrisic=vcvtwszxrz_vvmvl +def vcvtwszxrz_vvmvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.w.s.zx.rz $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=pvcvt.w.s intrisic=pvcvtws_vvl +def pvcvtws_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "pvcvt.w.s $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=pvcvt.w.s intrisic=pvcvtws_vvvl +def pvcvtws_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "pvcvt.w.s $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=pvcvt.w.s intrisic=pvcvtws_vvMvl +def pvcvtws_vvMvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvcvt.w.s $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=pvcvt.w.s.rz intrisic=pvcvtwsrz_vvl +def pvcvtwsrz_vvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "pvcvt.w.s.rz $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=pvcvt.w.s.rz intrisic=pvcvtwsrz_vvvl +def pvcvtwsrz_vvvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "pvcvt.w.s.rz $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIX asm=pvcvt.w.s.rz intrisic=pvcvtwsrz_vvMvl +def pvcvtwsrz_vvMvl : RV<0xe8, (outs V64:$vx), (ins V64:$vy, VM512:$vm, V64:$vd, I32:$vl), + "pvcvt.w.s.rz $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIXX asm=vcvt.l.d intrisic=vcvtld_vvl +def vcvtld_vvl : RV<0xa8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.l.d $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIXX asm=vcvt.l.d intrisic=vcvtld_vvvl +def vcvtld_vvvl : RV<0xa8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.l.d $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIXX asm=vcvt.l.d intrisic=vcvtld_vvmvl +def vcvtld_vvmvl : RV<0xa8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.l.d $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIXX asm=vcvt.l.d.rz intrisic=vcvtldrz_vvl +def vcvtldrz_vvl : RV<0xa8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.l.d.rz $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIXX asm=vcvt.l.d.rz intrisic=vcvtldrz_vvvl +def vcvtldrz_vvvl : RV<0xa8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.l.d.rz $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFIXX asm=vcvt.l.d.rz intrisic=vcvtldrz_vvmvl +def vcvtldrz_vvmvl : RV<0xa8, (outs V64:$vx), (ins V64:$vy, VM:$vm, V64:$vd, I32:$vl), + "vcvt.l.d.rz $vx,$vy,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLT asm=vcvt.d.w intrisic=vcvtdw_vvl +def vcvtdw_vvl : RV<0xf8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.d.w $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLT asm=vcvt.d.w intrisic=vcvtdw_vvvl +def vcvtdw_vvvl : RV<0xf8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.d.w $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLT asm=vcvt.s.w intrisic=vcvtsw_vvl +def vcvtsw_vvl : RV<0xf8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.s.w $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLT asm=vcvt.s.w intrisic=vcvtsw_vvvl +def vcvtsw_vvvl : RV<0xf8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.s.w $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLT asm=pvcvt.s.w intrisic=pvcvtsw_vvl +def pvcvtsw_vvl : RV<0xf8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "pvcvt.s.w $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLT asm=pvcvt.s.w intrisic=pvcvtsw_vvvl +def pvcvtsw_vvvl : RV<0xf8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "pvcvt.s.w $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLTX asm=vcvt.d.l intrisic=vcvtdl_vvl +def vcvtdl_vvl : RV<0xb8, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.d.l $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFLTX asm=vcvt.d.l intrisic=vcvtdl_vvvl +def vcvtdl_vvvl : RV<0xb8, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.d.l $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCVD asm=vcvt.d.s intrisic=vcvtds_vvl +def vcvtds_vvl : RV<0x8f, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.d.s $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCVD asm=vcvt.d.s intrisic=vcvtds_vvvl +def vcvtds_vvvl : RV<0x8f, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.d.s $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCVS asm=vcvt.s.d intrisic=vcvtsd_vvl +def vcvtsd_vvl : RV<0x9f, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vcvt.s.d $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCVS asm=vcvt.s.d intrisic=vcvtsd_vvvl +def vcvtsd_vvvl : RV<0x9f, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vcvt.s.d $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg intrisic=vmrg_vvvml +def vmrg_vvvml : RV<0xd6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, I32:$vl), + "vmrg $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg intrisic=vmrg_vvvmvl +def vmrg_vvvmvl : RV<0xd6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmrg $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg intrisic=vmrg_vsvml +def vmrg_vsvml : RV<0xd6, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, I32:$vl), + "vmrg $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg intrisic=vmrg_vsvmvl +def vmrg_vsvmvl : RV<0xd6, (outs V64:$vx), (ins I64:$sy, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmrg $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg intrisic=vmrg_vsvml +def vmrg_vIvml : RV<0xd6, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, I32:$vl), + "vmrg $vx,$I,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg intrisic=vmrg_vsvmvl +def vmrg_vIvmvl : RV<0xd6, (outs V64:$vx), (ins simm7Op64:$I, V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vmrg $vx,$I,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg.w intrisic=vmrgw_vvvMl +def vmrgw_vvvMl : RV<0xd6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, I32:$vl), + "vmrg.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg.w intrisic=vmrgw_vvvMvl +def vmrgw_vvvMvl : RV<0xd6, (outs V64:$vx), (ins V64:$vy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "vmrg.w $vx,$vy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg.w intrisic=vmrgw_vsvMl +def vmrgw_vsvMl : RV<0xd6, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM512:$vm, I32:$vl), + "vmrg.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMRG asm=vmrg.w intrisic=vmrgw_vsvMvl +def vmrgw_vsvMvl : RV<0xd6, (outs V64:$vx), (ins I32:$sy, V64:$vz, VM512:$vm, V64:$vd, I32:$vl), + "vmrg.w $vx,$sy,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSHF asm=vshf intrisic=vshf_vvvsl +def vshf_vvvsl : RV<0xbc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I64:$sy, I32:$vl), + "vshf $vx,$vy,$vz,$sy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSHF asm=vshf intrisic=vshf_vvvsvl +def vshf_vvvsvl : RV<0xbc, (outs V64:$vx), (ins V64:$vy, V64:$vz, I64:$sy, V64:$vd, I32:$vl), + "vshf $vx,$vy,$vz,$sy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSHF asm=vshf intrisic=vshf_vvvsl +def vshf_vvvIl : RV<0xbc, (outs V64:$vx), (ins V64:$vy, V64:$vz, simm7Op64:$N, I32:$vl), + "vshf $vx,$vy,$vz,$N", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSHF asm=vshf intrisic=vshf_vvvsvl +def vshf_vvvIvl : RV<0xbc, (outs V64:$vx), (ins V64:$vy, V64:$vz, simm7Op64:$N, V64:$vd, I32:$vl), + "vshf $vx,$vy,$vz,$N", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VCP asm=vcp intrisic=vcp_vvmvl +def vcp_vvmvl : RV<0x8d, (outs V64:$vx), (ins V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vcp $vx,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VEX asm=vex intrisic=vex_vvmvl +def vex_vvmvl : RV<0x9d, (outs V64:$vx), (ins V64:$vz, VM:$vm, V64:$vd, I32:$vl), + "vex $vx,$vz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.at intrisic=vfmklat_ml +def vfmklat_ml : RV<0xb4, (outs VM:$vmx), (ins I32:$vl), + "vfmk.l.at $vmx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.af intrisic=vfmklaf_ml +def vfmklaf_ml : RV<0xb4, (outs VM:$vmx), (ins I32:$vl), + "vfmk.l.af $vmx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=pvfmk.w.lo.at intrisic=pvfmkwloat_ml +def pvfmkwloat_ml : RV<0xb5, (outs VM:$vmx), (ins I32:$vl), + "pvfmk.w.lo.at $vmx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=pvfmk.w.up.at intrisic=pvfmkwupat_ml +def pvfmkwupat_ml : RV<0xb5, (outs VM:$vmx), (ins I32:$vl), + "pvfmk.w.up.at $vmx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=pvfmk.w.lo.af intrisic=pvfmkwloaf_ml +def pvfmkwloaf_ml : RV<0xb5, (outs VM:$vmx), (ins I32:$vl), + "pvfmk.w.lo.af $vmx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=pvfmk.w.up.af intrisic=pvfmkwupaf_ml +def pvfmkwupaf_ml : RV<0xb5, (outs VM:$vmx), (ins I32:$vl), + "pvfmk.w.up.af $vmx", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=pvfmk.at intrisic=pvfmkat_Ml +def pvfmkat_Ml : Pseudo<(outs VM512:$vmx), (ins I32:$vl), + "# pvfmk.at $vmx", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=pvfmk.af intrisic=pvfmkaf_Ml +def pvfmkaf_Ml : Pseudo<(outs VM512:$vmx), (ins I32:$vl), + "# pvfmk.af $vmx", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.gt intrisic=vfmklgt_mvl +def vfmklgt_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.gt intrisic=vfmklgt_mvml +def vfmklgt_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.lt intrisic=vfmkllt_mvl +def vfmkllt_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.lt intrisic=vfmkllt_mvml +def vfmkllt_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.ne intrisic=vfmklne_mvl +def vfmklne_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.ne intrisic=vfmklne_mvml +def vfmklne_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.eq intrisic=vfmkleq_mvl +def vfmkleq_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.eq intrisic=vfmkleq_mvml +def vfmkleq_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.ge intrisic=vfmklge_mvl +def vfmklge_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.ge intrisic=vfmklge_mvml +def vfmklge_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.le intrisic=vfmklle_mvl +def vfmklle_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.le intrisic=vfmklle_mvml +def vfmklle_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.num intrisic=vfmklnum_mvl +def vfmklnum_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.num intrisic=vfmklnum_mvml +def vfmklnum_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.nan intrisic=vfmklnan_mvl +def vfmklnan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.nan intrisic=vfmklnan_mvml +def vfmklnan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.gtnan intrisic=vfmklgtnan_mvl +def vfmklgtnan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.gtnan intrisic=vfmklgtnan_mvml +def vfmklgtnan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.ltnan intrisic=vfmklltnan_mvl +def vfmklltnan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.ltnan intrisic=vfmklltnan_mvml +def vfmklltnan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.nenan intrisic=vfmklnenan_mvl +def vfmklnenan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.nenan intrisic=vfmklnenan_mvml +def vfmklnenan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.eqnan intrisic=vfmkleqnan_mvl +def vfmkleqnan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.eqnan intrisic=vfmkleqnan_mvml +def vfmkleqnan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.genan intrisic=vfmklgenan_mvl +def vfmklgenan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.genan intrisic=vfmklgenan_mvml +def vfmklgenan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.lenan intrisic=vfmkllenan_mvl +def vfmkllenan_mvl : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.l.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMK asm=vfmk.l.lenan intrisic=vfmkllenan_mvml +def vfmkllenan_mvml : RV<0xb4, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.l.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.gt intrisic=vfmkwgt_mvl +def vfmkwgt_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.gt intrisic=vfmkwgt_mvml +def vfmkwgt_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.lt intrisic=vfmkwlt_mvl +def vfmkwlt_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.lt intrisic=vfmkwlt_mvml +def vfmkwlt_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.ne intrisic=vfmkwne_mvl +def vfmkwne_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.ne intrisic=vfmkwne_mvml +def vfmkwne_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.eq intrisic=vfmkweq_mvl +def vfmkweq_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.eq intrisic=vfmkweq_mvml +def vfmkweq_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.ge intrisic=vfmkwge_mvl +def vfmkwge_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.ge intrisic=vfmkwge_mvml +def vfmkwge_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.le intrisic=vfmkwle_mvl +def vfmkwle_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.le intrisic=vfmkwle_mvml +def vfmkwle_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.num intrisic=vfmkwnum_mvl +def vfmkwnum_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.num intrisic=vfmkwnum_mvml +def vfmkwnum_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.nan intrisic=vfmkwnan_mvl +def vfmkwnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.nan intrisic=vfmkwnan_mvml +def vfmkwnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.gtnan intrisic=vfmkwgtnan_mvl +def vfmkwgtnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.gtnan intrisic=vfmkwgtnan_mvml +def vfmkwgtnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.ltnan intrisic=vfmkwltnan_mvl +def vfmkwltnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.ltnan intrisic=vfmkwltnan_mvml +def vfmkwltnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.nenan intrisic=vfmkwnenan_mvl +def vfmkwnenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.nenan intrisic=vfmkwnenan_mvml +def vfmkwnenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.eqnan intrisic=vfmkweqnan_mvl +def vfmkweqnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.eqnan intrisic=vfmkweqnan_mvml +def vfmkweqnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.genan intrisic=vfmkwgenan_mvl +def vfmkwgenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.genan intrisic=vfmkwgenan_mvml +def vfmkwgenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.lenan intrisic=vfmkwlenan_mvl +def vfmkwlenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.w.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=vfmk.w.lenan intrisic=vfmkwlenan_mvml +def vfmkwlenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.w.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.gt intrisic=pvfmkwlogt_mvl +def pvfmkwlogt_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.gt intrisic=pvfmkwupgt_mvl +def pvfmkwupgt_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.gt intrisic=pvfmkwlogt_mvml +def pvfmkwlogt_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.gt intrisic=pvfmkwupgt_mvml +def pvfmkwupgt_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.lt intrisic=pvfmkwlolt_mvl +def pvfmkwlolt_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.lt intrisic=pvfmkwuplt_mvl +def pvfmkwuplt_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.lt intrisic=pvfmkwlolt_mvml +def pvfmkwlolt_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.lt intrisic=pvfmkwuplt_mvml +def pvfmkwuplt_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.ne intrisic=pvfmkwlone_mvl +def pvfmkwlone_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.ne intrisic=pvfmkwupne_mvl +def pvfmkwupne_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.ne intrisic=pvfmkwlone_mvml +def pvfmkwlone_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.ne intrisic=pvfmkwupne_mvml +def pvfmkwupne_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.eq intrisic=pvfmkwloeq_mvl +def pvfmkwloeq_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.eq intrisic=pvfmkwupeq_mvl +def pvfmkwupeq_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.eq intrisic=pvfmkwloeq_mvml +def pvfmkwloeq_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.eq intrisic=pvfmkwupeq_mvml +def pvfmkwupeq_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.ge intrisic=pvfmkwloge_mvl +def pvfmkwloge_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.ge intrisic=pvfmkwupge_mvl +def pvfmkwupge_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.ge intrisic=pvfmkwloge_mvml +def pvfmkwloge_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.ge intrisic=pvfmkwupge_mvml +def pvfmkwupge_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.le intrisic=pvfmkwlole_mvl +def pvfmkwlole_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.le intrisic=pvfmkwuple_mvl +def pvfmkwuple_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.le intrisic=pvfmkwlole_mvml +def pvfmkwlole_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.le intrisic=pvfmkwuple_mvml +def pvfmkwuple_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.num intrisic=pvfmkwlonum_mvl +def pvfmkwlonum_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.num intrisic=pvfmkwupnum_mvl +def pvfmkwupnum_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.num intrisic=pvfmkwlonum_mvml +def pvfmkwlonum_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.num intrisic=pvfmkwupnum_mvml +def pvfmkwupnum_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.nan intrisic=pvfmkwlonan_mvl +def pvfmkwlonan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.nan intrisic=pvfmkwupnan_mvl +def pvfmkwupnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.nan intrisic=pvfmkwlonan_mvml +def pvfmkwlonan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.nan intrisic=pvfmkwupnan_mvml +def pvfmkwupnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.gtnan intrisic=pvfmkwlogtnan_mvl +def pvfmkwlogtnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.gtnan intrisic=pvfmkwupgtnan_mvl +def pvfmkwupgtnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.gtnan intrisic=pvfmkwlogtnan_mvml +def pvfmkwlogtnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.gtnan intrisic=pvfmkwupgtnan_mvml +def pvfmkwupgtnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.ltnan intrisic=pvfmkwloltnan_mvl +def pvfmkwloltnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.ltnan intrisic=pvfmkwupltnan_mvl +def pvfmkwupltnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.ltnan intrisic=pvfmkwloltnan_mvml +def pvfmkwloltnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.ltnan intrisic=pvfmkwupltnan_mvml +def pvfmkwupltnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.nenan intrisic=pvfmkwlonenan_mvl +def pvfmkwlonenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.nenan intrisic=pvfmkwupnenan_mvl +def pvfmkwupnenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.nenan intrisic=pvfmkwlonenan_mvml +def pvfmkwlonenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.nenan intrisic=pvfmkwupnenan_mvml +def pvfmkwupnenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.eqnan intrisic=pvfmkwloeqnan_mvl +def pvfmkwloeqnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.eqnan intrisic=pvfmkwupeqnan_mvl +def pvfmkwupeqnan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.eqnan intrisic=pvfmkwloeqnan_mvml +def pvfmkwloeqnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.eqnan intrisic=pvfmkwupeqnan_mvml +def pvfmkwupeqnan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.genan intrisic=pvfmkwlogenan_mvl +def pvfmkwlogenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.genan intrisic=pvfmkwupgenan_mvl +def pvfmkwupgenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.genan intrisic=pvfmkwlogenan_mvml +def pvfmkwlogenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.genan intrisic=pvfmkwupgenan_mvml +def pvfmkwupgenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.lenan intrisic=pvfmkwlolenan_mvl +def pvfmkwlolenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.lo.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.lenan intrisic=pvfmkwuplenan_mvl +def pvfmkwuplenan_mvl : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.w.up.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lo.lenan intrisic=pvfmkwlolenan_mvml +def pvfmkwlolenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.lo.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.up.lenan intrisic=pvfmkwuplenan_mvml +def pvfmkwuplenan_mvml : RV<0xb5, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.w.up.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.gt intrisic=pvfmkwgt_Mvl +def pvfmkwgt_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.gt $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.gt intrisic=pvfmkwgt_MvMl +def pvfmkwgt_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.gt $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lt intrisic=pvfmkwlt_Mvl +def pvfmkwlt_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.lt $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lt intrisic=pvfmkwlt_MvMl +def pvfmkwlt_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.lt $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.ne intrisic=pvfmkwne_Mvl +def pvfmkwne_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.ne $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.ne intrisic=pvfmkwne_MvMl +def pvfmkwne_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.ne $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.eq intrisic=pvfmkweq_Mvl +def pvfmkweq_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.eq $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.eq intrisic=pvfmkweq_MvMl +def pvfmkweq_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.eq $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.ge intrisic=pvfmkwge_Mvl +def pvfmkwge_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.ge $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.ge intrisic=pvfmkwge_MvMl +def pvfmkwge_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.ge $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.le intrisic=pvfmkwle_Mvl +def pvfmkwle_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.le $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.le intrisic=pvfmkwle_MvMl +def pvfmkwle_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.le $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.num intrisic=pvfmkwnum_Mvl +def pvfmkwnum_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.num $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.num intrisic=pvfmkwnum_MvMl +def pvfmkwnum_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.num $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.nan intrisic=pvfmkwnan_Mvl +def pvfmkwnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.nan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.nan intrisic=pvfmkwnan_MvMl +def pvfmkwnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.nan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.gtnan intrisic=pvfmkwgtnan_Mvl +def pvfmkwgtnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.gtnan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.gtnan intrisic=pvfmkwgtnan_MvMl +def pvfmkwgtnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.gtnan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.ltnan intrisic=pvfmkwltnan_Mvl +def pvfmkwltnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.ltnan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.ltnan intrisic=pvfmkwltnan_MvMl +def pvfmkwltnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.ltnan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.nenan intrisic=pvfmkwnenan_Mvl +def pvfmkwnenan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.nenan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.nenan intrisic=pvfmkwnenan_MvMl +def pvfmkwnenan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.nenan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.eqnan intrisic=pvfmkweqnan_Mvl +def pvfmkweqnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.eqnan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.eqnan intrisic=pvfmkweqnan_MvMl +def pvfmkweqnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.eqnan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.genan intrisic=pvfmkwgenan_Mvl +def pvfmkwgenan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.genan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.genan intrisic=pvfmkwgenan_MvMl +def pvfmkwgenan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.genan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lenan intrisic=pvfmkwlenan_Mvl +def pvfmkwlenan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.w.lenan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMS asm=pvfmk.w.lenan intrisic=pvfmkwlenan_MvMl +def pvfmkwlenan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.w.lenan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.gt intrisic=vfmkdgt_mvl +def vfmkdgt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.gt intrisic=vfmkdgt_mvml +def vfmkdgt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.lt intrisic=vfmkdlt_mvl +def vfmkdlt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.lt intrisic=vfmkdlt_mvml +def vfmkdlt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.ne intrisic=vfmkdne_mvl +def vfmkdne_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.ne intrisic=vfmkdne_mvml +def vfmkdne_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.eq intrisic=vfmkdeq_mvl +def vfmkdeq_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.eq intrisic=vfmkdeq_mvml +def vfmkdeq_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.ge intrisic=vfmkdge_mvl +def vfmkdge_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.ge intrisic=vfmkdge_mvml +def vfmkdge_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.le intrisic=vfmkdle_mvl +def vfmkdle_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.le intrisic=vfmkdle_mvml +def vfmkdle_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.num intrisic=vfmkdnum_mvl +def vfmkdnum_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.num intrisic=vfmkdnum_mvml +def vfmkdnum_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.nan intrisic=vfmkdnan_mvl +def vfmkdnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.nan intrisic=vfmkdnan_mvml +def vfmkdnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.gtnan intrisic=vfmkdgtnan_mvl +def vfmkdgtnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.gtnan intrisic=vfmkdgtnan_mvml +def vfmkdgtnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.ltnan intrisic=vfmkdltnan_mvl +def vfmkdltnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.ltnan intrisic=vfmkdltnan_mvml +def vfmkdltnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.nenan intrisic=vfmkdnenan_mvl +def vfmkdnenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.nenan intrisic=vfmkdnenan_mvml +def vfmkdnenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.eqnan intrisic=vfmkdeqnan_mvl +def vfmkdeqnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.eqnan intrisic=vfmkdeqnan_mvml +def vfmkdeqnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.genan intrisic=vfmkdgenan_mvl +def vfmkdgenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.genan intrisic=vfmkdgenan_mvml +def vfmkdgenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.lenan intrisic=vfmkdlenan_mvl +def vfmkdlenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.d.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.d.lenan intrisic=vfmkdlenan_mvml +def vfmkdlenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.d.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.gt intrisic=vfmksgt_mvl +def vfmksgt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.gt intrisic=vfmksgt_mvml +def vfmksgt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.lt intrisic=vfmkslt_mvl +def vfmkslt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.lt intrisic=vfmkslt_mvml +def vfmkslt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.ne intrisic=vfmksne_mvl +def vfmksne_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.ne intrisic=vfmksne_mvml +def vfmksne_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.eq intrisic=vfmkseq_mvl +def vfmkseq_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.eq intrisic=vfmkseq_mvml +def vfmkseq_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.ge intrisic=vfmksge_mvl +def vfmksge_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.ge intrisic=vfmksge_mvml +def vfmksge_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.le intrisic=vfmksle_mvl +def vfmksle_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.le intrisic=vfmksle_mvml +def vfmksle_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.num intrisic=vfmksnum_mvl +def vfmksnum_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.num intrisic=vfmksnum_mvml +def vfmksnum_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.nan intrisic=vfmksnan_mvl +def vfmksnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.nan intrisic=vfmksnan_mvml +def vfmksnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.gtnan intrisic=vfmksgtnan_mvl +def vfmksgtnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.gtnan intrisic=vfmksgtnan_mvml +def vfmksgtnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.ltnan intrisic=vfmksltnan_mvl +def vfmksltnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.ltnan intrisic=vfmksltnan_mvml +def vfmksltnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.nenan intrisic=vfmksnenan_mvl +def vfmksnenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.nenan intrisic=vfmksnenan_mvml +def vfmksnenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.eqnan intrisic=vfmkseqnan_mvl +def vfmkseqnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.eqnan intrisic=vfmkseqnan_mvml +def vfmkseqnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.genan intrisic=vfmksgenan_mvl +def vfmksgenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.genan intrisic=vfmksgenan_mvml +def vfmksgenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.lenan intrisic=vfmkslenan_mvl +def vfmkslenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "vfmk.s.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=vfmk.s.lenan intrisic=vfmkslenan_mvml +def vfmkslenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "vfmk.s.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.gt intrisic=pvfmkslogt_mvl +def pvfmkslogt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.gt intrisic=pvfmksupgt_mvl +def pvfmksupgt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.gt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.gt intrisic=pvfmkslogt_mvml +def pvfmkslogt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.gt intrisic=pvfmksupgt_mvml +def pvfmksupgt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.gt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.lt intrisic=pvfmkslolt_mvl +def pvfmkslolt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.lt intrisic=pvfmksuplt_mvl +def pvfmksuplt_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.lt $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.lt intrisic=pvfmkslolt_mvml +def pvfmkslolt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.lt intrisic=pvfmksuplt_mvml +def pvfmksuplt_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.lt $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.ne intrisic=pvfmkslone_mvl +def pvfmkslone_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.ne intrisic=pvfmksupne_mvl +def pvfmksupne_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.ne $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.ne intrisic=pvfmkslone_mvml +def pvfmkslone_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.ne intrisic=pvfmksupne_mvml +def pvfmksupne_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.ne $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.eq intrisic=pvfmksloeq_mvl +def pvfmksloeq_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.eq intrisic=pvfmksupeq_mvl +def pvfmksupeq_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.eq $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.eq intrisic=pvfmksloeq_mvml +def pvfmksloeq_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.eq intrisic=pvfmksupeq_mvml +def pvfmksupeq_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.eq $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.ge intrisic=pvfmksloge_mvl +def pvfmksloge_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.ge intrisic=pvfmksupge_mvl +def pvfmksupge_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.ge $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.ge intrisic=pvfmksloge_mvml +def pvfmksloge_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.ge intrisic=pvfmksupge_mvml +def pvfmksupge_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.ge $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.le intrisic=pvfmkslole_mvl +def pvfmkslole_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.le intrisic=pvfmksuple_mvl +def pvfmksuple_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.le $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.le intrisic=pvfmkslole_mvml +def pvfmkslole_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.le intrisic=pvfmksuple_mvml +def pvfmksuple_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.le $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.num intrisic=pvfmkslonum_mvl +def pvfmkslonum_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.num intrisic=pvfmksupnum_mvl +def pvfmksupnum_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.num $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.num intrisic=pvfmkslonum_mvml +def pvfmkslonum_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.num intrisic=pvfmksupnum_mvml +def pvfmksupnum_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.num $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.nan intrisic=pvfmkslonan_mvl +def pvfmkslonan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.nan intrisic=pvfmksupnan_mvl +def pvfmksupnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.nan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.nan intrisic=pvfmkslonan_mvml +def pvfmkslonan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.nan intrisic=pvfmksupnan_mvml +def pvfmksupnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.nan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.gtnan intrisic=pvfmkslogtnan_mvl +def pvfmkslogtnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.gtnan intrisic=pvfmksupgtnan_mvl +def pvfmksupgtnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.gtnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.gtnan intrisic=pvfmkslogtnan_mvml +def pvfmkslogtnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.gtnan intrisic=pvfmksupgtnan_mvml +def pvfmksupgtnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.gtnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.ltnan intrisic=pvfmksloltnan_mvl +def pvfmksloltnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.ltnan intrisic=pvfmksupltnan_mvl +def pvfmksupltnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.ltnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.ltnan intrisic=pvfmksloltnan_mvml +def pvfmksloltnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.ltnan intrisic=pvfmksupltnan_mvml +def pvfmksupltnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.ltnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.nenan intrisic=pvfmkslonenan_mvl +def pvfmkslonenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.nenan intrisic=pvfmksupnenan_mvl +def pvfmksupnenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.nenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.nenan intrisic=pvfmkslonenan_mvml +def pvfmkslonenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.nenan intrisic=pvfmksupnenan_mvml +def pvfmksupnenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.nenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.eqnan intrisic=pvfmksloeqnan_mvl +def pvfmksloeqnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.eqnan intrisic=pvfmksupeqnan_mvl +def pvfmksupeqnan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.eqnan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.eqnan intrisic=pvfmksloeqnan_mvml +def pvfmksloeqnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.eqnan intrisic=pvfmksupeqnan_mvml +def pvfmksupeqnan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.eqnan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.genan intrisic=pvfmkslogenan_mvl +def pvfmkslogenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.genan intrisic=pvfmksupgenan_mvl +def pvfmksupgenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.genan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.genan intrisic=pvfmkslogenan_mvml +def pvfmkslogenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.genan intrisic=pvfmksupgenan_mvml +def pvfmksupgenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.genan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.lenan intrisic=pvfmkslolenan_mvl +def pvfmkslolenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.lo.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.lenan intrisic=pvfmksuplenan_mvl +def pvfmksuplenan_mvl : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, I32:$vl), + "pvfmk.s.up.lenan $vmx,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lo.lenan intrisic=pvfmkslolenan_mvml +def pvfmkslolenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.lo.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.up.lenan intrisic=pvfmksuplenan_mvml +def pvfmksuplenan_mvml : RV<0xb6, (outs VM:$vmx), (ins V64:$vz, VM:$vm, I32:$vl), + "pvfmk.s.up.lenan $vmx,$vz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.gt intrisic=pvfmksgt_Mvl +def pvfmksgt_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.gt $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.gt intrisic=pvfmksgt_MvMl +def pvfmksgt_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.gt $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lt intrisic=pvfmkslt_Mvl +def pvfmkslt_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.lt $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lt intrisic=pvfmkslt_MvMl +def pvfmkslt_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.lt $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.ne intrisic=pvfmksne_Mvl +def pvfmksne_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.ne $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.ne intrisic=pvfmksne_MvMl +def pvfmksne_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.ne $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.eq intrisic=pvfmkseq_Mvl +def pvfmkseq_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.eq $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.eq intrisic=pvfmkseq_MvMl +def pvfmkseq_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.eq $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.ge intrisic=pvfmksge_Mvl +def pvfmksge_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.ge $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.ge intrisic=pvfmksge_MvMl +def pvfmksge_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.ge $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.le intrisic=pvfmksle_Mvl +def pvfmksle_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.le $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.le intrisic=pvfmksle_MvMl +def pvfmksle_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.le $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.num intrisic=pvfmksnum_Mvl +def pvfmksnum_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.num $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.num intrisic=pvfmksnum_MvMl +def pvfmksnum_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.num $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.nan intrisic=pvfmksnan_Mvl +def pvfmksnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.nan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.nan intrisic=pvfmksnan_MvMl +def pvfmksnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.nan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.gtnan intrisic=pvfmksgtnan_Mvl +def pvfmksgtnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.gtnan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.gtnan intrisic=pvfmksgtnan_MvMl +def pvfmksgtnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.gtnan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.ltnan intrisic=pvfmksltnan_Mvl +def pvfmksltnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.ltnan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.ltnan intrisic=pvfmksltnan_MvMl +def pvfmksltnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.ltnan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.nenan intrisic=pvfmksnenan_Mvl +def pvfmksnenan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.nenan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.nenan intrisic=pvfmksnenan_MvMl +def pvfmksnenan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.nenan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.eqnan intrisic=pvfmkseqnan_Mvl +def pvfmkseqnan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.eqnan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.eqnan intrisic=pvfmkseqnan_MvMl +def pvfmkseqnan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.eqnan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.genan intrisic=pvfmksgenan_Mvl +def pvfmksgenan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.genan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.genan intrisic=pvfmksgenan_MvMl +def pvfmksgenan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.genan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lenan intrisic=pvfmkslenan_Mvl +def pvfmkslenan_Mvl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, I32:$vl), + "# pvfmk.s.lenan $vmx,$vz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMF asm=pvfmk.s.lenan intrisic=pvfmkslenan_MvMl +def pvfmkslenan_MvMl : Pseudo<(outs VM512:$vmx), (ins V64:$vz, VM512:$vm, I32:$vl), + "# pvfmk.s.lenan $vmx,$vz,$vm", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUMS asm=vsum.w.sx intrisic=vsumwsx_vvl +def vsumwsx_vvl : RV<0xea, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vsum.w.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUMS asm=vsum.w.sx intrisic=vsumwsx_vvml +def vsumwsx_vvml : RV<0xea, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vsum.w.sx $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUMS asm=vsum.w.zx intrisic=vsumwzx_vvl +def vsumwzx_vvl : RV<0xea, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vsum.w.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUMS asm=vsum.w.zx intrisic=vsumwzx_vvml +def vsumwzx_vvml : RV<0xea, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vsum.w.zx $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUMX asm=vsum.l intrisic=vsuml_vvl +def vsuml_vvl : RV<0xaa, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vsum.l $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSUMX asm=vsum.l intrisic=vsuml_vvml +def vsuml_vvml : RV<0xaa, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vsum.l $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSUM asm=vfsum.d intrisic=vfsumd_vvl +def vfsumd_vvl : RV<0xec, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfsum.d $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSUM asm=vfsum.d intrisic=vfsumd_vvml +def vfsumd_vvml : RV<0xec, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vfsum.d $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSUM asm=vfsum.s intrisic=vfsums_vvl +def vfsums_vvl : RV<0xec, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfsum.s $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFSUM asm=vfsum.s intrisic=vfsums_vvml +def vfsums_vvml : RV<0xec, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vfsum.s $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.fst.sx intrisic=vrmaxswfstsx_vvl +def vrmaxswfstsx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmaxs.w.fst.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.fst.sx intrisic=vrmaxswfstsx_vvvl +def vrmaxswfstsx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmaxs.w.fst.sx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.lst.sx intrisic=vrmaxswlstsx_vvl +def vrmaxswlstsx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmaxs.w.lst.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.lst.sx intrisic=vrmaxswlstsx_vvvl +def vrmaxswlstsx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmaxs.w.lst.sx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.fst.zx intrisic=vrmaxswfstzx_vvl +def vrmaxswfstzx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmaxs.w.fst.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.fst.zx intrisic=vrmaxswfstzx_vvvl +def vrmaxswfstzx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmaxs.w.fst.zx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.lst.zx intrisic=vrmaxswlstzx_vvl +def vrmaxswlstzx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmaxs.w.lst.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmaxs.w.lst.zx intrisic=vrmaxswlstzx_vvvl +def vrmaxswlstzx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmaxs.w.lst.zx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.fst.sx intrisic=vrminswfstsx_vvl +def vrminswfstsx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmins.w.fst.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.fst.sx intrisic=vrminswfstsx_vvvl +def vrminswfstsx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmins.w.fst.sx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.lst.sx intrisic=vrminswlstsx_vvl +def vrminswlstsx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmins.w.lst.sx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.lst.sx intrisic=vrminswlstsx_vvvl +def vrminswlstsx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmins.w.lst.sx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.fst.zx intrisic=vrminswfstzx_vvl +def vrminswfstzx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmins.w.fst.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.fst.zx intrisic=vrminswfstzx_vvvl +def vrminswfstzx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmins.w.fst.zx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.lst.zx intrisic=vrminswlstzx_vvl +def vrminswlstzx_vvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmins.w.lst.zx $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXS asm=vrmins.w.lst.zx intrisic=vrminswlstzx_vvvl +def vrminswlstzx_vvvl : RV<0xbb, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmins.w.lst.zx $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmaxs.l.fst intrisic=vrmaxslfst_vvl +def vrmaxslfst_vvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmaxs.l.fst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmaxs.l.fst intrisic=vrmaxslfst_vvvl +def vrmaxslfst_vvvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmaxs.l.fst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmaxs.l.lst intrisic=vrmaxsllst_vvl +def vrmaxsllst_vvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmaxs.l.lst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmaxs.l.lst intrisic=vrmaxsllst_vvvl +def vrmaxsllst_vvvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmaxs.l.lst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmins.l.fst intrisic=vrminslfst_vvl +def vrminslfst_vvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmins.l.fst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmins.l.fst intrisic=vrminslfst_vvvl +def vrminslfst_vvvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmins.l.fst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmins.l.lst intrisic=vrminsllst_vvl +def vrminsllst_vvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrmins.l.lst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VMAXX asm=vrmins.l.lst intrisic=vrminsllst_vvvl +def vrminsllst_vvvl : RV<0xab, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vrmins.l.lst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.d.fst intrisic=vfrmaxdfst_vvl +def vfrmaxdfst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmax.d.fst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.d.fst intrisic=vfrmaxdfst_vvvl +def vfrmaxdfst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmax.d.fst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.d.lst intrisic=vfrmaxdlst_vvl +def vfrmaxdlst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmax.d.lst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.d.lst intrisic=vfrmaxdlst_vvvl +def vfrmaxdlst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmax.d.lst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.s.fst intrisic=vfrmaxsfst_vvl +def vfrmaxsfst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmax.s.fst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.s.fst intrisic=vfrmaxsfst_vvvl +def vfrmaxsfst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmax.s.fst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.s.lst intrisic=vfrmaxslst_vvl +def vfrmaxslst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmax.s.lst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmax.s.lst intrisic=vfrmaxslst_vvvl +def vfrmaxslst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmax.s.lst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.d.fst intrisic=vfrmindfst_vvl +def vfrmindfst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmin.d.fst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.d.fst intrisic=vfrmindfst_vvvl +def vfrmindfst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmin.d.fst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.d.lst intrisic=vfrmindlst_vvl +def vfrmindlst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmin.d.lst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.d.lst intrisic=vfrmindlst_vvvl +def vfrmindlst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmin.d.lst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.s.fst intrisic=vfrminsfst_vvl +def vfrminsfst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmin.s.fst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.s.fst intrisic=vfrminsfst_vvvl +def vfrminsfst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmin.s.fst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.s.lst intrisic=vfrminslst_vvl +def vfrminslst_vvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vfrmin.s.lst $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VFMAX asm=vfrmin.s.lst intrisic=vfrminslst_vvvl +def vfrminslst_vvvl : RV<0xad, (outs V64:$vx), (ins V64:$vy, V64:$vd, I32:$vl), + "vfrmin.s.lst $vx,$vy", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRAND asm=vrand intrisic=vrand_vvl +def vrand_vvl : RV<0x88, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrand $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRAND asm=vrand intrisic=vrand_vvml +def vrand_vvml : RV<0x88, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vrand $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VROR asm=vror intrisic=vror_vvl +def vror_vvl : RV<0x98, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vror $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VROR asm=vror intrisic=vror_vvml +def vror_vvml : RV<0x98, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vror $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRXOR asm=vrxor intrisic=vrxor_vvl +def vrxor_vvl : RV<0x89, (outs V64:$vx), (ins V64:$vy, I32:$vl), + "vrxor $vx,$vy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VRXOR asm=vrxor intrisic=vrxor_vvml +def vrxor_vvml : RV<0x89, (outs V64:$vx), (ins V64:$vy, VM:$vm, I32:$vl), + "vrxor $vx,$vy,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssl +def vgt_vvssl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgt $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssvl +def vgt_vvssvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgt $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssl +def vgt_vvsZl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgt $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssvl +def vgt_vvsZvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgt $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssl +def vgt_vvIsl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgt $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssvl +def vgt_vvIsvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgt $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssl +def vgt_vvIZl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgt $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssvl +def vgt_vvIZvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgt $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssml +def vgt_vvssml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgt $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssmvl +def vgt_vvssmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgt $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssml +def vgt_vvsZml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgt $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssmvl +def vgt_vvsZmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgt $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssml +def vgt_vvIsml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgt $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssmvl +def vgt_vvIsmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgt $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssml +def vgt_vvIZml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgt $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt intrisic=vgt_vvssmvl +def vgt_vvIZmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgt $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssl +def vgtnc_vvssl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgt.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssvl +def vgtnc_vvssvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssl +def vgtnc_vvsZl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgt.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssvl +def vgtnc_vvsZvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssl +def vgtnc_vvIsl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgt.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssvl +def vgtnc_vvIsvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssl +def vgtnc_vvIZl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgt.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssvl +def vgtnc_vvIZvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssml +def vgtnc_vvssml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgt.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssmvl +def vgtnc_vvssmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssml +def vgtnc_vvsZml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgt.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssmvl +def vgtnc_vvsZmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssml +def vgtnc_vvIsml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgt.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssmvl +def vgtnc_vvIsmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssml +def vgtnc_vvIZml : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgt.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGT asm=vgt.nc intrisic=vgtnc_vvssmvl +def vgtnc_vvIZmvl : RV<0xa1, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgt.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssl +def vgtu_vvssl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgtu $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssvl +def vgtu_vvssvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssl +def vgtu_vvsZl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgtu $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssvl +def vgtu_vvsZvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssl +def vgtu_vvIsl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgtu $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssvl +def vgtu_vvIsvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssl +def vgtu_vvIZl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgtu $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssvl +def vgtu_vvIZvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssml +def vgtu_vvssml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgtu $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssmvl +def vgtu_vvssmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssml +def vgtu_vvsZml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtu $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssmvl +def vgtu_vvsZmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssml +def vgtu_vvIsml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgtu $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssmvl +def vgtu_vvIsmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssml +def vgtu_vvIZml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtu $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu intrisic=vgtu_vvssmvl +def vgtu_vvIZmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtu $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssl +def vgtunc_vvssl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssvl +def vgtunc_vvssvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssl +def vgtunc_vvsZl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssvl +def vgtunc_vvsZvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssl +def vgtunc_vvIsl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgtu.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssvl +def vgtunc_vvIsvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssl +def vgtunc_vvIZl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgtu.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssvl +def vgtunc_vvIZvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssml +def vgtunc_vvssml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssmvl +def vgtunc_vvssmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssml +def vgtunc_vvsZml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssmvl +def vgtunc_vvsZmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssml +def vgtunc_vvIsml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgtu.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssmvl +def vgtunc_vvIsmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssml +def vgtunc_vvIZml : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtu.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTU asm=vgtu.nc intrisic=vgtunc_vvssmvl +def vgtunc_vvIZmvl : RV<0xa2, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtu.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssl +def vgtlsx_vvssl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssvl +def vgtlsx_vvssvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssl +def vgtlsx_vvsZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssvl +def vgtlsx_vvsZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssl +def vgtlsx_vvIsl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgtl.sx $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssvl +def vgtlsx_vvIsvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssl +def vgtlsx_vvIZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgtl.sx $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssvl +def vgtlsx_vvIZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssml +def vgtlsx_vvssml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssmvl +def vgtlsx_vvssmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssml +def vgtlsx_vvsZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssmvl +def vgtlsx_vvsZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssml +def vgtlsx_vvIsml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgtl.sx $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssmvl +def vgtlsx_vvIsmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssml +def vgtlsx_vvIZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.sx $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx intrisic=vgtlsx_vvssmvl +def vgtlsx_vvIZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssl +def vgtlsxnc_vvssl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssvl +def vgtlsxnc_vvssvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssl +def vgtlsxnc_vvsZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssvl +def vgtlsxnc_vvsZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssl +def vgtlsxnc_vvIsl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssvl +def vgtlsxnc_vvIsvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssl +def vgtlsxnc_vvIZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssvl +def vgtlsxnc_vvIZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssml +def vgtlsxnc_vvssml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssmvl +def vgtlsxnc_vvssmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssml +def vgtlsxnc_vvsZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssmvl +def vgtlsxnc_vvsZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssml +def vgtlsxnc_vvIsml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssmvl +def vgtlsxnc_vvIsmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssml +def vgtlsxnc_vvIZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.sx.nc intrisic=vgtlsxnc_vvssmvl +def vgtlsxnc_vvIZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.sx.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssl +def vgtlzx_vvssl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssvl +def vgtlzx_vvssvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssl +def vgtlzx_vvsZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssvl +def vgtlzx_vvsZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssl +def vgtlzx_vvIsl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgtl.zx $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssvl +def vgtlzx_vvIsvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssl +def vgtlzx_vvIZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgtl.zx $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssvl +def vgtlzx_vvIZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssml +def vgtlzx_vvssml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssmvl +def vgtlzx_vvssmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssml +def vgtlzx_vvsZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssmvl +def vgtlzx_vvsZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssml +def vgtlzx_vvIsml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgtl.zx $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssmvl +def vgtlzx_vvIsmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssml +def vgtlzx_vvIZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.zx $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx intrisic=vgtlzx_vvssmvl +def vgtlzx_vvIZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssl +def vgtlzxnc_vvssl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssvl +def vgtlzxnc_vvssvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssl +def vgtlzxnc_vvsZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssvl +def vgtlzxnc_vvsZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssl +def vgtlzxnc_vvIsl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssvl +def vgtlzxnc_vvIsvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssl +def vgtlzxnc_vvIZl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssvl +def vgtlzxnc_vvIZvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssml +def vgtlzxnc_vvssml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssmvl +def vgtlzxnc_vvssmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssml +def vgtlzxnc_vvsZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssmvl +def vgtlzxnc_vvsZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssml +def vgtlzxnc_vvIsml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssmvl +def vgtlzxnc_vvIsmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssml +def vgtlzxnc_vvIZml : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VGTL asm=vgtl.zx.nc intrisic=vgtlzxnc_vvssmvl +def vgtlzxnc_vvIZmvl : RV<0xa3, (outs V64:$vx), (ins V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, V64:$vd, I32:$vl), + "vgtl.zx.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssl +def vsc_vvssl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vsc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssl +def vsc_vvsZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vsc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssl +def vsc_vvIsl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vsc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssl +def vsc_vvIZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vsc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssml +def vsc_vvssml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vsc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssml +def vsc_vvsZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssml +def vsc_vvIsml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vsc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc intrisic=vsc_vvssml +def vsc_vvIZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssl +def vscnc_vvssl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vsc.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssl +def vscnc_vvsZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vsc.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssl +def vscnc_vvIsl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vsc.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssl +def vscnc_vvIZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vsc.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssml +def vscnc_vvssml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vsc.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssml +def vscnc_vvsZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssml +def vscnc_vvIsml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vsc.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc intrisic=vscnc_vvssml +def vscnc_vvIZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssl +def vscot_vvssl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vsc.ot $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssl +def vscot_vvsZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vsc.ot $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssl +def vscot_vvIsl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vsc.ot $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssl +def vscot_vvIZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vsc.ot $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssml +def vscot_vvssml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vsc.ot $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssml +def vscot_vvsZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc.ot $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssml +def vscot_vvIsml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vsc.ot $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.ot intrisic=vscot_vvssml +def vscot_vvIZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc.ot $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssl +def vscncot_vvssl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vsc.nc.ot $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssl +def vscncot_vvsZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vsc.nc.ot $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssl +def vscncot_vvIsl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vsc.nc.ot $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssl +def vscncot_vvIZl : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vsc.nc.ot $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssml +def vscncot_vvssml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vsc.nc.ot $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssml +def vscncot_vvsZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc.nc.ot $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssml +def vscncot_vvIsml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vsc.nc.ot $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSC asm=vsc.nc.ot intrisic=vscncot_vvssml +def vscncot_vvIZml : RV<0xb1, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vsc.nc.ot $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssl +def vscu_vvssl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscu $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssl +def vscu_vvsZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscu $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssl +def vscu_vvIsl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscu $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssl +def vscu_vvIZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscu $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssml +def vscu_vvssml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscu $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssml +def vscu_vvsZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssml +def vscu_vvIsml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscu $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu intrisic=vscu_vvssml +def vscu_vvIZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssl +def vscunc_vvssl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscu.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssl +def vscunc_vvsZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscu.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssl +def vscunc_vvIsl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscu.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssl +def vscunc_vvIZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscu.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssml +def vscunc_vvssml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscu.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssml +def vscunc_vvsZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssml +def vscunc_vvIsml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscu.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc intrisic=vscunc_vvssml +def vscunc_vvIZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssl +def vscuot_vvssl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscu.ot $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssl +def vscuot_vvsZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscu.ot $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssl +def vscuot_vvIsl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscu.ot $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssl +def vscuot_vvIZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscu.ot $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssml +def vscuot_vvssml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscu.ot $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssml +def vscuot_vvsZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu.ot $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssml +def vscuot_vvIsml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscu.ot $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.ot intrisic=vscuot_vvssml +def vscuot_vvIZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu.ot $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssl +def vscuncot_vvssl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscu.nc.ot $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssl +def vscuncot_vvsZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscu.nc.ot $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssl +def vscuncot_vvIsl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscu.nc.ot $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssl +def vscuncot_vvIZl : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscu.nc.ot $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssml +def vscuncot_vvssml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscu.nc.ot $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssml +def vscuncot_vvsZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu.nc.ot $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssml +def vscuncot_vvIsml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscu.nc.ot $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCU asm=vscu.nc.ot intrisic=vscuncot_vvssml +def vscuncot_vvIZml : RV<0xb2, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscu.nc.ot $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssl +def vscl_vvssl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscl $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssl +def vscl_vvsZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscl $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssl +def vscl_vvIsl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscl $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssl +def vscl_vvIZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscl $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssml +def vscl_vvssml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscl $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssml +def vscl_vvsZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssml +def vscl_vvIsml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscl $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl intrisic=vscl_vvssml +def vscl_vvIZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssl +def vsclnc_vvssl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscl.nc $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssl +def vsclnc_vvsZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscl.nc $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssl +def vsclnc_vvIsl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscl.nc $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssl +def vsclnc_vvIZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscl.nc $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssml +def vsclnc_vvssml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscl.nc $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssml +def vsclnc_vvsZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl.nc $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssml +def vsclnc_vvIsml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscl.nc $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc intrisic=vsclnc_vvssml +def vsclnc_vvIZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl.nc $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssl +def vsclot_vvssl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscl.ot $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssl +def vsclot_vvsZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscl.ot $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssl +def vsclot_vvIsl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscl.ot $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssl +def vsclot_vvIZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscl.ot $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssml +def vsclot_vvssml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscl.ot $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssml +def vsclot_vvsZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl.ot $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssml +def vsclot_vvIsml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscl.ot $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.ot intrisic=vsclot_vvssml +def vsclot_vvIZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl.ot $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssl +def vsclncot_vvssl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, I32:$vl), + "vscl.nc.ot $vx,$vy,$sy,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssl +def vsclncot_vvsZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, I32:$vl), + "vscl.nc.ot $vx,$vy,$sy,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssl +def vsclncot_vvIsl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, I32:$vl), + "vscl.nc.ot $vx,$vy,$I,$sz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssl +def vsclncot_vvIZl : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, I32:$vl), + "vscl.nc.ot $vx,$vy,$I,$Z", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssml +def vsclncot_vvssml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, I64:$sz, VM:$vm, I32:$vl), + "vscl.nc.ot $vx,$vy,$sy,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssml +def vsclncot_vvsZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, I64:$sy, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl.nc.ot $vx,$vy,$sy,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssml +def vsclncot_vvIsml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, I64:$sz, VM:$vm, I32:$vl), + "vscl.nc.ot $vx,$vy,$I,$sz,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=VSCL asm=vscl.nc.ot intrisic=vsclncot_vvssml +def vsclncot_vvIZml : RV<0xb3, (outs ), (ins V64:$vx, V64:$vy, simm7Op64:$I, simm7Op64:$Z, VM:$vm, I32:$vl), + "vscl.nc.ot $vx,$vy,$I,$Z,$vm", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=ANDM asm=andm intrisic=andm_mmm +def andm_mmm : RV<0x84, (outs VM:$vmx), (ins VM:$vmy, VM:$vmz), + "andm $vmx,$vmy,$vmz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=ANDM asm=andm intrisic=andm_MMM +def andm_MMM : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz), + "# andm $vmx,$vmy,$vmz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=ORM asm=orm intrisic=orm_mmm +def orm_mmm : RV<0x85, (outs VM:$vmx), (ins VM:$vmy, VM:$vmz), + "orm $vmx,$vmy,$vmz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=ORM asm=orm intrisic=orm_MMM +def orm_MMM : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz), + "# orm $vmx,$vmy,$vmz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=XORM asm=xorm intrisic=xorm_mmm +def xorm_mmm : RV<0x86, (outs VM:$vmx), (ins VM:$vmy, VM:$vmz), + "xorm $vmx,$vmy,$vmz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=XORM asm=xorm intrisic=xorm_MMM +def xorm_MMM : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz), + "# xorm $vmx,$vmy,$vmz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=EQVM asm=eqvm intrisic=eqvm_mmm +def eqvm_mmm : RV<0x87, (outs VM:$vmx), (ins VM:$vmy, VM:$vmz), + "eqvm $vmx,$vmy,$vmz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=EQVM asm=eqvm intrisic=eqvm_MMM +def eqvm_MMM : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz), + "# eqvm $vmx,$vmy,$vmz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=NNDM asm=nndm intrisic=nndm_mmm +def nndm_mmm : RV<0x94, (outs VM:$vmx), (ins VM:$vmy, VM:$vmz), + "nndm $vmx,$vmy,$vmz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=NNDM asm=nndm intrisic=nndm_MMM +def nndm_MMM : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz), + "# nndm $vmx,$vmy,$vmz", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=NEGM asm=negm intrisic=negm_mm +def negm_mm : RV<0x95, (outs VM:$vmx), (ins VM:$vmy), + "negm $vmx,$vmy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=NEGM asm=negm intrisic=negm_MM +def negm_MM : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy), + "# negm $vmx,$vmy", []> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=PCVM asm=pcvm intrisic=pcvm_sml +def pcvm_sml : RV<0xa4, (outs I64:$sx), (ins VM:$vmy, I32:$vl), + "pcvm $sx,$vmy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=LZVM asm=lzvm intrisic=lzvm_sml +def lzvm_sml : RV<0xa5, (outs I64:$sx), (ins VM:$vmy, I32:$vl), + "lzvm $sx,$vmy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + +// inst=TOVM asm=tovm intrisic=tovm_sml +def tovm_sml : RV<0xa6, (outs I64:$sx), (ins VM:$vmy, I32:$vl), + "tovm $sx,$vmy", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + diff --git a/llvm/lib/Target/VE/VEInstrVecVL.td b/llvm/lib/Target/VE/VEInstrVecVL.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEInstrVecVL.td @@ -0,0 +1,52 @@ +// LVS lvs lvsl_svs +def lvsl_svs : RV<0x9e, (outs I64:$sx), (ins V64:$vx, I32:$sy), + "lvs $sx,$vx($sy)", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +def lvsl_svI : RV<0x9e, (outs I64:$sx), (ins V64:$vx, simm7Op32:$sy), + "lvs $sx,$vx($sy)", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + + +// LVS lvs lvss_svs +def lvss_svs : RV<0x9e, (outs F32:$sx), (ins V64:$vx, I32:$sy), + "lvs $sx,$vx($sy)", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// inst=LSV asm=lsv intrisic=lsv_vvss +def lsv_vvss : RV<0x8e, (outs V64:$vx), (ins V64:$vd, I32:$sy, I64:$sz), + "lsv $vx($sy),$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +def lsv_vvIs : RV<0x8e, (outs V64:$vx), (ins V64:$vd, simm7Op32:$sy, I64:$sz), + "lsv $vx($sy),$sz", [], NoItinerary> +{ + let Constraints = "$vx = $vd"; + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; +} + +// tempolary +def vor_v1vl : RV<0xc5, (outs V64:$vx), (ins uimm6Op64:$sy, V64:$vz, I32:$vl), + "vor $vx,($sy)1,$vz", [], NoItinerary> +{ + let DecoderNamespace = "VEL"; + let isCodeGenOnly = 1; + let DisableEncoding = "$vl"; +} + + +include "VEInstrVecVL.gen.td" diff --git a/llvm/lib/Target/VE/VEMCInstLower.cpp b/llvm/lib/Target/VE/VEMCInstLower.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEMCInstLower.cpp @@ -0,0 +1,116 @@ +//===-- VEMCInstLower.cpp - Convert VE MachineInstr to MCInst -------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains code to lower VE MachineInstrs to their corresponding +// MCInst records. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/VEMCExpr.h" +#include "VE.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/IR/Mangler.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" + +using namespace llvm; + +static MCOperand LowerSymbolOperand(const MachineInstr *MI, + const MachineOperand &MO, + const MCSymbol *Symbol, AsmPrinter &AP) { + + VEMCExpr::VariantKind Kind = (VEMCExpr::VariantKind)MO.getTargetFlags(); + + const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::create(Symbol, AP.OutContext); + const VEMCExpr *expr = VEMCExpr::create(Kind, MCSym, AP.OutContext); + return MCOperand::createExpr(expr); +} + +static MCOperand LowerOperand(const MachineInstr *MI, const MachineOperand &MO, + AsmPrinter &AP) { + switch (MO.getType()) { + default: + report_fatal_error("unknown operand type"); + break; + case MachineOperand::MO_CImmediate: + report_fatal_error("unsupported MO_CImmediate operand type"); + break; + case MachineOperand::MO_FPImmediate: + report_fatal_error("unsupported MO_FPImmediate operand type"); + break; + case MachineOperand::MO_FrameIndex: + report_fatal_error("unsupported MO_FrameIndex operand type"); + break; + case MachineOperand::MO_TargetIndex: + report_fatal_error("unsupported MO_TargetIndex operand type"); + break; + case MachineOperand::MO_JumpTableIndex: + return LowerSymbolOperand(MI, MO, AP.GetJTISymbol(MO.getIndex()), AP); + case MachineOperand::MO_RegisterLiveOut: + report_fatal_error("unsupported MO_RegistrLiveOut operand type"); + break; + case MachineOperand::MO_Metadata: + report_fatal_error("unsupported MO_Metadata operand type"); + break; + case MachineOperand::MO_MCSymbol: + return LowerSymbolOperand(MI, MO, MO.getMCSymbol(), AP); + break; + case MachineOperand::MO_CFIIndex: + report_fatal_error("unsupported MO_CFIIndex operand type"); + break; + case MachineOperand::MO_IntrinsicID: + report_fatal_error("unsupported MO_IntrinsicID operand type"); + break; + case MachineOperand::MO_Predicate: + report_fatal_error("unsupported MO_Predicate operand type"); + break; + + case MachineOperand::MO_Register: + if (MO.isImplicit()) + break; + return MCOperand::createReg(MO.getReg()); + + case MachineOperand::MO_Immediate: + return MCOperand::createImm(MO.getImm()); + + case MachineOperand::MO_MachineBasicBlock: + return LowerSymbolOperand(MI, MO, MO.getMBB()->getSymbol(), AP); + case MachineOperand::MO_GlobalAddress: + return LowerSymbolOperand(MI, MO, AP.getSymbol(MO.getGlobal()), AP); + case MachineOperand::MO_BlockAddress: + return LowerSymbolOperand( + MI, MO, AP.GetBlockAddressSymbol(MO.getBlockAddress()), AP); + case MachineOperand::MO_ExternalSymbol: + return LowerSymbolOperand( + MI, MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP); + case MachineOperand::MO_ConstantPoolIndex: + return LowerSymbolOperand(MI, MO, AP.GetCPISymbol(MO.getIndex()), AP); + + case MachineOperand::MO_RegisterMask: + break; + } + return MCOperand(); +} + +void llvm::LowerVEMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI, + AsmPrinter &AP) { + OutMI.setOpcode(MI->getOpcode()); + + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + MCOperand MCOp = LowerOperand(MI, MO, AP); + + if (MCOp.isValid()) + OutMI.addOperand(MCOp); + } +} diff --git a/llvm/lib/Target/VE/VEMachineFunctionInfo.h b/llvm/lib/Target/VE/VEMachineFunctionInfo.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEMachineFunctionInfo.h @@ -0,0 +1,63 @@ +//===- VEMachineFunctionInfo.h - VE Machine Function Info -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares VE specific per-machine-function information. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_LIB_TARGET_VE_VEMACHINEFUNCTIONINFO_H +#define LLVM_LIB_TARGET_VE_VEMACHINEFUNCTIONINFO_H + +#include "llvm/CodeGen/MachineFunction.h" + +namespace llvm { + +class VEMachineFunctionInfo : public MachineFunctionInfo { + virtual void anchor(); + +private: + unsigned GlobalBaseReg; + + /// VectorLengthReg - Holds the virtual register for VL register. + unsigned VectorLengthReg; + + /// VarArgsFrameOffset - Frame offset to start of varargs area. + int VarArgsFrameOffset; + + /// SRetReturnReg - Holds the virtual register into which the sret + /// argument is passed. + unsigned SRetReturnReg; + + /// IsLeafProc - True if the function is a leaf procedure. + bool IsLeafProc; + +public: + VEMachineFunctionInfo() + : GlobalBaseReg(0), VectorLengthReg(0), VarArgsFrameOffset(0), + SRetReturnReg(0), IsLeafProc(false) {} + explicit VEMachineFunctionInfo(MachineFunction &MF) + : GlobalBaseReg(0), VectorLengthReg(0), VarArgsFrameOffset(0), + SRetReturnReg(0), IsLeafProc(false) {} + + unsigned getGlobalBaseReg() const { return GlobalBaseReg; } + void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; } + + unsigned getVectorLengthReg() const { return VectorLengthReg; } + void setVectorLengthReg(unsigned Reg) { VectorLengthReg = Reg; } + + int getVarArgsFrameOffset() const { return VarArgsFrameOffset; } + void setVarArgsFrameOffset(int Offset) { VarArgsFrameOffset = Offset; } + + unsigned getSRetReturnReg() const { return SRetReturnReg; } + void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } + + void setLeafProc(bool rhs) { IsLeafProc = rhs; } + bool isLeafProc() const { return IsLeafProc; } +}; +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VEMachineFunctionInfo.cpp b/llvm/lib/Target/VE/VEMachineFunctionInfo.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VEMachineFunctionInfo.cpp @@ -0,0 +1,13 @@ +//===-- VEMachineFunctionInfo.cpp - VE Machine Function Info --------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "VEMachineFunctionInfo.h" + +using namespace llvm; + +void VEMachineFunctionInfo::anchor() {} diff --git a/llvm/lib/Target/VE/VERegisterInfo.h b/llvm/lib/Target/VE/VERegisterInfo.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VERegisterInfo.h @@ -0,0 +1,59 @@ +//===-- VERegisterInfo.h - VE Register Information Impl ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the VE implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VEREGISTERINFO_H +#define LLVM_LIB_TARGET_VE_VEREGISTERINFO_H + +#include "llvm/CodeGen/TargetRegisterInfo.h" + +#define GET_REGINFO_HEADER +#include "VEGenRegisterInfo.inc" + +namespace llvm { +struct VERegisterInfo : public VEGenRegisterInfo { +private: + // VLS register class's Pressure Set ID. + unsigned VLSPSetID; + +public: + VERegisterInfo(); + + /// Code Generation virtual methods... + const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override; + const uint32_t *getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID CC) const override; + const uint32_t *getNoPreservedMask() const override; + + BitVector getReservedRegs(const MachineFunction &MF) const override; + bool isConstantPhysReg(unsigned PhysReg) const override; + + const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF, + unsigned Kind) const override; + + bool requiresRegisterScavenging(const MachineFunction &MF) const override; + bool requiresFrameIndexScavenging(const MachineFunction &MF) const override; + + void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, + unsigned FIOperandNum, + RegScavenger *RS = nullptr) const override; + + Register getFrameRegister(const MachineFunction &MF) const override; + + bool canRealignStack(const MachineFunction &MF) const override; + + unsigned getRegPressureSetLimit(const MachineFunction &MF, + unsigned Idx) const override; +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VERegisterInfo.cpp b/llvm/lib/Target/VE/VERegisterInfo.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VERegisterInfo.cpp @@ -0,0 +1,488 @@ +//===-- VERegisterInfo.cpp - VE Register Information ----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the VE implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#include "VERegisterInfo.h" +#include "VE.h" +#include "VEMachineFunctionInfo.h" +#include "VESubtarget.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +#define GET_REGINFO_TARGET_DESC +#include "VEGenRegisterInfo.inc" + +// VE uses %s10 == %lp to keep return address +VERegisterInfo::VERegisterInfo() : VEGenRegisterInfo(VE::SX10) {} + +bool VERegisterInfo::requiresRegisterScavenging( + const MachineFunction &MF) const { + return true; +} + +bool VERegisterInfo::requiresFrameIndexScavenging( + const MachineFunction &MF) const { + return true; +} + +const MCPhysReg * +VERegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { + const Function &F = MF->getFunction(); + CallingConv::ID CC = F.getCallingConv(); + + switch (CC) { + case CallingConv::X86_RegCall: + return CSR_RegCall_SaveList; + default: + return CSR_SaveList; + } +} + +const uint32_t *VERegisterInfo::getCallPreservedMask(const MachineFunction &MF, + CallingConv::ID CC) const { + switch (CC) { + case CallingConv::X86_RegCall: + return CSR_RegCall_RegMask; + case CallingConv::VE_VEC_EXPF: + return CSR_vec_expf_RegMask; + case CallingConv::VE_LLVM_GROW_STACK: + return CSR_llvm_grow_stack_RegMask; + default: + return CSR_RegMask; + } +} + +const uint32_t *VERegisterInfo::getNoPreservedMask() const { + return CSR_NoRegs_RegMask; +} + +BitVector VERegisterInfo::getReservedRegs(const MachineFunction &MF) const { + BitVector Reserved(getNumRegs()); + Reserved.set(VE::SX8); // stack limit + Reserved.set(VE::SX9); // frame pointer + Reserved.set(VE::SX10); // link register (return address) + Reserved.set(VE::SX11); // stack pointer + + // FIXME: maybe not need to be reserved + Reserved.set(VE::SX12); // outer register + Reserved.set(VE::SX13); // id register for dynamic linker + + Reserved.set(VE::SX14); // thread pointer + Reserved.set(VE::SX15); // global offset table register + Reserved.set(VE::SX16); // procedure linkage table register + Reserved.set(VE::SX17); // linkage-area register + + // Also reserve the register pair aliases covering the above + // registers, with the same conditions. This is required since + // LiveIntervals treat a register as a non reserved register if any + // of its aliases are not reserved. + Reserved.set(VE::Q4); // SX8_SX9 + Reserved.set(VE::Q5); // SX10_SX11 + Reserved.set(VE::Q6); // SX12_SX13 + Reserved.set(VE::Q7); // SX14_SX15 + Reserved.set(VE::Q8); // SX16_SX17 + + // Also reserve the integer 32 bit registers convering the above registers. + Reserved.set(VE::SW8); + Reserved.set(VE::SW9); + Reserved.set(VE::SW10); + Reserved.set(VE::SW11); + Reserved.set(VE::SW12); + Reserved.set(VE::SW13); + Reserved.set(VE::SW14); + Reserved.set(VE::SW15); + Reserved.set(VE::SW16); + Reserved.set(VE::SW17); + + // Also reserve the floating point 32 bit registers convering the above + // registers. + Reserved.set(VE::SF8); + Reserved.set(VE::SF9); + Reserved.set(VE::SF10); + Reserved.set(VE::SF11); + Reserved.set(VE::SF12); + Reserved.set(VE::SF13); + Reserved.set(VE::SF14); + Reserved.set(VE::SF15); + Reserved.set(VE::SF16); + Reserved.set(VE::SF17); + + // Also reserve the integer 16 bit registers convering the above registers. + Reserved.set(VE::SH8); + Reserved.set(VE::SH9); + Reserved.set(VE::SH10); + Reserved.set(VE::SH11); + Reserved.set(VE::SH12); + Reserved.set(VE::SH13); + Reserved.set(VE::SH14); + Reserved.set(VE::SH15); + Reserved.set(VE::SH16); + Reserved.set(VE::SH17); + + // Also reserve the integer 8 bit registers convering the above registers. + Reserved.set(VE::SB8); + Reserved.set(VE::SB9); + Reserved.set(VE::SB10); + Reserved.set(VE::SB11); + Reserved.set(VE::SB12); + Reserved.set(VE::SB13); + Reserved.set(VE::SB14); + Reserved.set(VE::SB15); + Reserved.set(VE::SB16); + Reserved.set(VE::SB17); + + // VL register is reserved + // Reserved.set(VE::VL); + + // Other Misc registers are reserved + Reserved.set(VE::UCC); + Reserved.set(VE::PSW); + Reserved.set(VE::SAR); + Reserved.set(VE::PMMR); + Reserved.set(VE::PMCR0); + Reserved.set(VE::PMCR1); + Reserved.set(VE::PMCR2); + Reserved.set(VE::PMCR3); + Reserved.set(VE::PMC0); + Reserved.set(VE::PMC1); + Reserved.set(VE::PMC2); + Reserved.set(VE::PMC3); + Reserved.set(VE::PMC4); + Reserved.set(VE::PMC5); + Reserved.set(VE::PMC6); + Reserved.set(VE::PMC7); + Reserved.set(VE::PMC8); + Reserved.set(VE::PMC9); + Reserved.set(VE::PMC10); + Reserved.set(VE::PMC11); + Reserved.set(VE::PMC12); + Reserved.set(VE::PMC13); + Reserved.set(VE::PMC14); + + // reserve constant registers + Reserved.set(VE::VM0); + Reserved.set(VE::VMP0); + + // sx18-sx33 are callee-saved registers + // sx34-sx63 are temporary registers + + return Reserved; +} + +bool VERegisterInfo::isConstantPhysReg(unsigned PhysReg) const { + switch (PhysReg) { + case VE::VM0: + case VE::VMP0: + return true; + default: + return false; + } +} + +const TargetRegisterClass * +VERegisterInfo::getPointerRegClass(const MachineFunction &MF, + unsigned Kind) const { + return &VE::I64RegClass; +} + +#define DEBUG_TYPE "ve" + +static void replaceFI(MachineFunction &MF, MachineBasicBlock::iterator II, + MachineInstr &MI, const DebugLoc &dl, + unsigned FIOperandNum, int Offset, unsigned FramePtr) { + if (1) { + LLVM_DEBUG(dbgs() << "replaceFI: "; MI.dump()); + } + + // Replace frame index with a temporal register if the instruction is + // vector load/store. + if (MI.getOpcode() == VE::LDVRri || MI.getOpcode() == VE::STVRri) { + // Original MI is: + // STVRri frame-index, offset, reg, vl (, memory operand) + // or + // LDVRri reg, frame-index, offset, vl (, memory operand) + // Convert it to: + // LEA tmp-reg, frame-reg, offset + // vst_vIsl reg, 8, tmp-reg, vl (ignored) + // or + // vld_vIsl reg, 8, tmp-reg, vl (ignored) + int opc = MI.getOpcode() == VE::LDVRri ? VE::vld_vIsl : VE::vst_vIsl; + int regi = MI.getOpcode() == VE::LDVRri ? 0 : 2; + const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); + unsigned Reg = MI.getOperand(regi).getReg(); + bool isDef = MI.getOperand(regi).isDef(); + bool isKill = MI.getOperand(regi).isKill(); + + // Prepare for VL + unsigned VLReg; + if (MI.getOperand(3).isImm()) { + int64_t val = MI.getOperand(3).getImm(); + // TODO: if 'val' is already assigned to a register, then use it + VLReg = MF.getRegInfo().createVirtualRegister(&VE::I32RegClass); + BuildMI(*MI.getParent(), II, dl, TII.get(VE::LEA32zzi), VLReg).addImm(val); + } else { + VLReg = MI.getOperand(3).getReg(); + } + + unsigned Tmp1 = MF.getRegInfo().createVirtualRegister(&VE::I64RegClass); + BuildMI(*MI.getParent(), II, dl, TII.get(VE::LEAasx), Tmp1) + .addReg(FramePtr).addImm(Offset); + + MI.setDesc(TII.get(opc)); + MI.getOperand(0).ChangeToRegister(Reg, isDef, false, isKill); + MI.getOperand(1).ChangeToImmediate(8); + MI.getOperand(2).ChangeToRegister(Tmp1, false, false, true); + MI.getOperand(3).ChangeToRegister(VLReg, false, false, true); + return; + } + + // Otherwise, replace frame index with a frame pointer reference directly. + // VE has 32 bit offset field, so no need to expand a target instruction. + // Directly encode it. + MI.getOperand(FIOperandNum).ChangeToRegister(FramePtr, false); + MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); +} + +void VERegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, unsigned FIOperandNum, + RegScavenger *RS) const { + assert(SPAdj == 0 && "Unexpected"); + + MachineInstr &MI = *II; + DebugLoc dl = MI.getDebugLoc(); + int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); + MachineFunction &MF = *MI.getParent()->getParent(); + const VESubtarget &Subtarget = MF.getSubtarget(); + const VEFrameLowering *TFI = getFrameLowering(MF); + + unsigned FrameReg; + int Offset; + Offset = TFI->getFrameIndexReference(MF, FrameIndex, FrameReg); + + Offset += MI.getOperand(FIOperandNum + 1).getImm(); + + if (MI.getOpcode() == VE::STQri) { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + unsigned SrcReg = MI.getOperand(2).getReg(); + unsigned SrcHiReg = getSubReg(SrcReg, VE::sub_even); + unsigned SrcLoReg = getSubReg(SrcReg, VE::sub_odd); + // VE stores HiReg to 8(addr) and LoReg to 0(addr) + MachineInstr *StMI = BuildMI(*MI.getParent(), II, dl, TII.get(VE::STSri)) + .addReg(FrameReg) + .addImm(0) + .addReg(SrcLoReg); + replaceFI(MF, II, *StMI, dl, 0, Offset, FrameReg); + MI.setDesc(TII.get(VE::STSri)); + MI.getOperand(2).setReg(SrcHiReg); + Offset += 8; + } else if (MI.getOpcode() == VE::LDQri) { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + unsigned DestReg = MI.getOperand(0).getReg(); + unsigned DestHiReg = getSubReg(DestReg, VE::sub_even); + unsigned DestLoReg = getSubReg(DestReg, VE::sub_odd); + // VE loads HiReg from 8(addr) and LoReg from 0(addr) + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::LDSri), DestLoReg) + .addReg(FrameReg) + .addImm(0); + replaceFI(MF, II, *StMI, dl, 1, Offset, FrameReg); + MI.setDesc(TII.get(VE::LDSri)); + MI.getOperand(0).setReg(DestHiReg); + Offset += 8; + } else if (MI.getOpcode() == VE::STVRri) { + // fall-through + } else if (MI.getOpcode() == VE::LDVRri) { + // fall-through + } else if (MI.getOpcode() == VE::STVMri) { + // Original MI is: + // STVMri frame-index, offset, reg (, memory operand) + // Convert it to: + // SVMi tmp-reg, reg, 0 + // STSri frame-reg, offset, tmp-reg + // SVMi tmp-reg, reg, 1 + // STSri frame-reg, offset+8, tmp-reg + // SVMi tmp-reg, reg, 2 + // STSri frame-reg, offset+16, tmp-reg + // SVMi tmp-reg, reg, 3 + // STSri frame-reg, offset+24, tmp-reg + + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + unsigned SrcReg = MI.getOperand(2).getReg(); + bool isKill = MI.getOperand(2).isKill(); + // FIXME: it would be better to scavenge a register here instead of + // reserving SX16 all of the time. + unsigned TmpReg = VE::SX16; + for (int i = 0; i < 3; ++i) { + BuildMI(*MI.getParent(), II, dl, TII.get(VE::svm_smI), TmpReg) + .addReg(SrcReg).addImm(i); + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::STSri)) + .addReg(FrameReg).addImm(0) + .addReg(TmpReg, getKillRegState(true)); + replaceFI(MF, II, *StMI, dl, 0, Offset, FrameReg); + Offset += 8; + } + BuildMI(*MI.getParent(), II, dl, TII.get(VE::svm_smI), TmpReg) + .addReg(SrcReg, getKillRegState(isKill)).addImm(3); + MI.setDesc(TII.get(VE::STSri)); + MI.getOperand(2).ChangeToRegister(TmpReg, false, false, true); + } else if (MI.getOpcode() == VE::LDVMri) { + // Original MI is: + // LDVMri reg, frame-index, offset (, memory operand) + // Convert it to: + // LDSri tmp-reg, frame-reg, offset + // LVMi reg, reg, 0, tmp-reg + // LDSri tmp-reg, frame-reg, offset+8 + // LVMi reg, reg, 1, tmp-reg + // LDSri tmp-reg, frame-reg, offset+16 + // LVMi reg, reg, 2, tmp-reg + // LDSri tmp-reg, frame-reg, offset+24 + // LVMi reg, reg, 3, tmp-reg + + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + unsigned DestReg = MI.getOperand(0).getReg(); + // FIXME: it would be better to scavenge a register here instead of + // reserving SX16 all of the time. + unsigned TmpReg = VE::SX16; + BuildMI(*MI.getParent(), II, dl, TII.get(VE::IMPLICIT_DEF), DestReg); + for (int i = 0; i < 3; ++i) { + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::LDSri), TmpReg) + .addReg(FrameReg).addImm(0); + replaceFI(MF, II, *StMI, dl, 1, Offset, FrameReg); + BuildMI(*MI.getParent(), II, dl, TII.get(VE::lvm_mmIs), DestReg) + .addReg(DestReg).addImm(i).addReg(TmpReg, getKillRegState(true)); + Offset += 8; + } + MI.setDesc(TII.get(VE::LDSri)); + MI.getOperand(0).ChangeToRegister(TmpReg, true); + BuildMI(*MI.getParent(), std::next(II), dl, TII.get(VE::lvm_mmIs), DestReg) + .addReg(DestReg).addImm(3).addReg(TmpReg, getKillRegState(true)); + } else if (MI.getOpcode() == VE::STVM512ri) { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + unsigned SrcReg = MI.getOperand(2).getReg(); + unsigned SrcLoReg = getSubReg(SrcReg, VE::sub_vm_odd); + unsigned SrcHiReg = getSubReg(SrcReg, VE::sub_vm_even); + bool isKill = MI.getOperand(2).isKill(); + // FIXME: it would be better to scavenge a register here instead of + // reserving SX16 all of the time. + unsigned TmpReg = VE::SX16; + // store low part of VMP + MachineInstr *LastMI = nullptr; + for (int i = 0; i < 4; ++i) { + LastMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::svm_smI), TmpReg) + .addReg(SrcLoReg).addImm(i); + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::STSri)) + .addReg(FrameReg).addImm(0).addReg(TmpReg, getKillRegState(true)); + replaceFI(MF, II, *StMI, dl, 0, Offset, FrameReg); + Offset += 8; + } + if (isKill) + LastMI->addRegisterKilled(SrcLoReg, this); + // store high part of VMP + for (int i = 0; i < 3; ++i) { + BuildMI(*MI.getParent(), II, dl, TII.get(VE::svm_smI), TmpReg) + .addReg(SrcHiReg).addImm(i); + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::STSri)) + .addReg(FrameReg).addImm(0).addReg(TmpReg, getKillRegState(true)); + replaceFI(MF, II, *StMI, dl, 0, Offset, FrameReg); + Offset += 8; + } + LastMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::svm_smI), TmpReg) + .addReg(SrcHiReg).addImm(3); + if (isKill) { + LastMI->addRegisterKilled(SrcHiReg, this); + // Add implicit super-register kills to the particular MI. + LastMI->addRegisterKilled(SrcReg, this); + } + MI.setDesc(TII.get(VE::STSri)); + MI.getOperand(2).ChangeToRegister(TmpReg, false, false, true); + } else if (MI.getOpcode() == VE::LDVM512ri) { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + unsigned DestReg = MI.getOperand(0).getReg(); + unsigned DestLoReg = getSubReg(DestReg, VE::sub_vm_odd); + unsigned DestHiReg = getSubReg(DestReg, VE::sub_vm_even); + // FIXME: it would be better to scavenge a register here instead of + // reserving SX16 all of the time. + unsigned TmpReg = VE::SX16; + BuildMI(*MI.getParent(), II, dl, TII.get(VE::IMPLICIT_DEF), DestReg); + for (int i = 0; i < 4; ++i) { + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::LDSri), TmpReg) + .addReg(FrameReg).addImm(0); + replaceFI(MF, II, *StMI, dl, 1, Offset, FrameReg); + BuildMI(*MI.getParent(), II, dl, TII.get(VE::lvm_mmIs), DestLoReg) + .addReg(DestLoReg).addImm(i).addReg(TmpReg, getKillRegState(true)); + Offset += 8; + } + for (int i = 0; i < 3; ++i) { + MachineInstr *StMI = + BuildMI(*MI.getParent(), II, dl, TII.get(VE::LDSri), TmpReg) + .addReg(FrameReg).addImm(0); + replaceFI(MF, II, *StMI, dl, 1, Offset, FrameReg); + BuildMI(*MI.getParent(), II, dl, TII.get(VE::lvm_mmIs), DestHiReg) + .addReg(DestHiReg).addImm(i).addReg(TmpReg, getKillRegState(true)); + Offset += 8; + } + MI.setDesc(TII.get(VE::LDSri)); + MI.getOperand(0).ChangeToRegister(TmpReg, true); + BuildMI(*MI.getParent(), std::next(II), dl, TII.get(VE::lvm_mmIs), DestHiReg) + .addReg(DestHiReg).addImm(3).addReg(TmpReg, getKillRegState(true)); + } + + replaceFI(MF, II, MI, dl, FIOperandNum, Offset, FrameReg); +} + +unsigned VERegisterInfo::getRegPressureSetLimit(const MachineFunction &MF, + unsigned Idx) const { + return VEGenRegisterInfo::getRegPressureSetLimit(MF, Idx); +} + +Register VERegisterInfo::getFrameRegister(const MachineFunction &MF) const { + return VE::SX9; +} + +// VE has no architectural need for stack realignment support, +// except that LLVM unfortunately currently implements overaligned +// stack objects by depending upon stack realignment support. +// If that ever changes, this can probably be deleted. +bool VERegisterInfo::canRealignStack(const MachineFunction &MF) const { + if (!TargetRegisterInfo::canRealignStack(MF)) + return false; + + // VE always has a fixed frame pointer register, so don't need to + // worry about needing to reserve it. [even if we don't have a frame + // pointer for our frame, it still cannot be used for other things, + // or register window traps will be SADNESS.] + + // If there's a reserved call frame, we can use VE to access locals. + if (getFrameLowering(MF)->hasReservedCallFrame(MF)) + return true; + + // Otherwise, we'd need a base pointer, but those aren't implemented + // for VE at the moment. + + return false; +} diff --git a/llvm/lib/Target/VE/VERegisterInfo.td b/llvm/lib/Target/VE/VERegisterInfo.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VERegisterInfo.td @@ -0,0 +1,178 @@ +//===-- VERegisterInfo.td - VE Register defs ---------------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Declarations that describe the VE register file +//===----------------------------------------------------------------------===// + +class VEReg Enc, string n> : Register { + let HWEncoding{15-7} = 0; + let HWEncoding{6-0} = Enc; + let Namespace = "VE"; +} + +class VEMiscReg Enc, string n>: Register { + let HWEncoding{15-5} = 0; + let HWEncoding{4-0} = Enc; + let Namespace = "VE"; +} + +let Namespace = "VE" in { + def sub_i8 : SubRegIndex<8, 56>; // Low 8 bit (56..63) + def sub_i16 : SubRegIndex<16, 48>; // Low 16 bit (48..63) + def sub_i32 : SubRegIndex<32, 32>; // Low 32 bit (32..63) + def sub_f32 : SubRegIndex<32>; // High 32 bit (0..31) + def sub_even : SubRegIndex<64>; // High 64 bit (0..63) + def sub_odd : SubRegIndex<64, 64>; // Low 64 bit (64..127) + def sub_vm_even : SubRegIndex<256>; // High 256 bit (0..255) + def sub_vm_odd : SubRegIndex<256, 256>; // Low 256 bit (256..511) +} + +// Registers are identified with 7-bit ID numbers. +// R - 64-bit integer or floating-point registers +class R Enc, string n, list subregs = [], + list aliases = []>: VEReg { + let SubRegs = subregs; + let Aliases = aliases; +} + +// Rq - Slots in the register file for 128-bit floating-point values. +class Rq Enc, string n, list subregs> : VEReg { + let SubRegs = subregs; + let SubRegIndices = [sub_even, sub_odd]; + let CoveredBySubRegs = 1; +} + +// Vector Registers are identified with 7-bit ID numbers. +// VR - 64-bit wide 256 elements integer or floating-point registers +class VR Enc, string n> : VEReg; + +// Vector Mask Registers are identified with 5-bit ID numbers. +// VM - 256-bit wide mask registers +class VM Enc, string n> : VEReg; + +// Pseudo 512b mask register using two 256b physical mask register. +// Two 256b mask register has to be sequential and start from even register +// such as (VM2, VM3). Even register is used for upper(0:31) bit in packed +// vector instructions, and odd register is used for lower(32:63). +class VMp Enc, string n, list subregs> : VEReg { + let SubRegs = subregs; + let SubRegIndices = [sub_vm_even, sub_vm_odd]; +} + +// Miscellaneous Registers +def UCC : VEMiscReg<0, "UCC">; // User clock counter +def PSW : VEMiscReg<1, "PSW">; // Program status word +def SAR : VEMiscReg<2, "SAR">; // Store address register +def PMMR : VEMiscReg<7, "PMMR">; // Performance monitor mode register + +// Performance monitor configuration registers +foreach I = 0-3 in + def PMCR#I : VEMiscReg; + +// Performance monitor counter +foreach I = 0-14 in + def PMC#I : VEMiscReg; + +// Generic integer registers - 8 bits wide +foreach I = 0-63 in + def SB#I : R, DwarfRegNum<[I]>; + +// Generic integer registers - 16 bits wide +let SubRegIndices = [sub_i8] in +foreach I = 0-63 in + def SH#I : R("SB"#I)]>, DwarfRegNum<[I]>; + +// Generic integer registers - 32 bits wide +let SubRegIndices = [sub_i16] in +foreach I = 0-63 in + def SW#I : R("SH"#I)]>, DwarfRegNum<[I]>; + +// Generic floating point registers - 32 bits wide +// NOTE: Mark SF#I as alias of SW#I temporary to avoid register allocation +// problem. +foreach I = 0-63 in + def SF#I : R("SW"#I)]>, DwarfRegNum<[I]>; + +// Generic integer registers - 64 bits wide +let SubRegIndices = [sub_i32, sub_f32], CoveredBySubRegs = 1 in +foreach I = 0-63 in + def SX#I : R("SW"#I), !cast("SF"#I)]>, + DwarfRegNum<[I]>; + +// Vector registers - 64 bits wide 256 elements +foreach I = 0-63 in + def V#I : VR, DwarfRegNum<[!add(64,I)]>; + +// Vector mask registers - 256 bits wide +foreach I = 0-15 in + def VM#I : VM, DwarfRegNum<[!add(128,I)]>; + +// Aliases of VMs to use as a pair of two VM for packed instructions +def VMP0 : VMp<0, "VM0", [VM0, VM1]>; +def VMP1 : VMp<2, "VM2", [VM2, VM3]>; +def VMP2 : VMp<4, "VM4", [VM4, VM5]>; +def VMP3 : VMp<6, "VM6", [VM6, VM7]>; +def VMP4 : VMp<8, "VM8", [VM8, VM9]>; +def VMP5 : VMp<10, "VM10", [VM10, VM11]>; +def VMP6 : VMp<12, "VM12", [VM12, VM13]>; +def VMP7 : VMp<14, "VM14", [VM14, VM15]>; + +// Aliases of the S* registers used to hold 128-bit for values (long doubles). +// Following foreach represents something like: +// def Q0 : Rq<0, "S0", [S0, S1]>; +// def Q1 : Rq<2, "S2", [S2, S3]>; +// ... +foreach I = 0-31 in + def Q#I : Rq("SX"#!shl(I,1)), + !cast("SX"#!add(!shl(I,1),1))]>; + +// Register classes. +// +// The register order is defined in terms of the preferred +// allocation order. +def I8 : RegisterClass<"VE", [i8], 8, + (add (sequence "SB%u", 34, 63), + (sequence "SB%u", 0, 33))>; +def I16 : RegisterClass<"VE", [i16], 16, + (add (sequence "SH%u", 34, 63), + (sequence "SH%u", 0, 33))>; +def I32 : RegisterClass<"VE", [i32], 32, + (add (sequence "SW%u", 34, 63), + (sequence "SW%u", 0, 33))>; +def I64 : RegisterClass<"VE", [i64, f64], 64, + (add (sequence "SX%u", 34, 63), + (sequence "SX%u", 0, 33))>; +def F32 : RegisterClass<"VE", [f32], 32, + (add (sequence "SF%u", 34, 63), + (sequence "SF%u", 0, 33))>; +def F128 : RegisterClass<"VE", [f128], 128, + (add (sequence "Q%u", 17, 31), + (sequence "Q%u", 0, 16))>; + +def Misc : RegisterClass<"VE", [i64], 64, + (add UCC, PSW, SAR, PMMR, + (sequence "PMCR%u", 0, 3), + (sequence "PMC%u", 0, 14))>; + +def V64 : RegisterClass<"VE", + [v512i32, v512f32, + v256i64, v256i32, v256f32, v256f64, + v128i64, v128i32, v128f32, v128f64, + v64i64, v64i32, v64f32, v64f64, + v32i64, v32i32, v32f32, v32f64, + v16i64, v16i32, v16f32, v16f64, + v8i64, v8i32, v8f32, v8f64, + v4i64, v4i32, v4f32, v4f64, + v2i64, v2i32, v2f32, v2f64], + 64, (sequence "V%u", 0, 63)>; +// vm0 is reserved for always true + +def VM : RegisterClass<"VE", [v256i1, v4i64], 64, (sequence "VM%u", 0, 15)>; +def VM512 : RegisterClass<"VE", [v512i1, v8i64], 64, (sequence "VMP%u", 0, 7)>; diff --git a/llvm/lib/Target/VE/VESchedule.td b/llvm/lib/Target/VE/VESchedule.td new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VESchedule.td @@ -0,0 +1,27 @@ +//===-- VESchedule.td - Describe the VE Itineries ----------*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +/* +def ALU_FU : FuncUnit; +def LDST_FU : FuncUnit; + +def VE_ALU : InstrItinClass; +def VE_LD : InstrItinClass; +def VE_ST : InstrItinClass; + +def VEItinerary : ProcessorItineraries<[ALU_FU, LDST_FU],[],[ + // InstrItinData]>, + // InstrItinData]>, + // InstrItinData]>, + // InstrItinData]>, + InstrItinData]> +]>; +*/ diff --git a/llvm/lib/Target/VE/VESubtarget.h b/llvm/lib/Target/VE/VESubtarget.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VESubtarget.h @@ -0,0 +1,76 @@ +//===-- VESubtarget.h - Define Subtarget for the VE -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the VE specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VESUBTARGET_H +#define LLVM_LIB_TARGET_VE_VESUBTARGET_H + +#include "VEFrameLowering.h" +#include "VEISelLowering.h" +#include "VEInstrInfo.h" +#include "llvm/CodeGen/SelectionDAGTargetInfo.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/IR/DataLayout.h" +#include + +#define GET_SUBTARGETINFO_HEADER +#include "VEGenSubtargetInfo.inc" + +namespace llvm { +class StringRef; + +class VESubtarget : public VEGenSubtargetInfo { + Triple TargetTriple; + virtual void anchor(); + bool Vectorize; + + VEInstrInfo InstrInfo; + VETargetLowering TLInfo; + SelectionDAGTargetInfo TSInfo; + VEFrameLowering FrameLowering; + +public: + VESubtarget(const Triple &TT, const std::string &CPU, const std::string &FS, + const TargetMachine &TM); + + const VEInstrInfo *getInstrInfo() const override { return &InstrInfo; } + const TargetFrameLowering *getFrameLowering() const override { + return &FrameLowering; + } + const VERegisterInfo *getRegisterInfo() const override { + return &InstrInfo.getRegisterInfo(); + } + const VETargetLowering *getTargetLowering() const override { return &TLInfo; } + const SelectionDAGTargetInfo *getSelectionDAGInfo() const override { + return &TSInfo; + } + + bool enableMachineScheduler() const override; + + bool vectorize() const { return Vectorize; } + + /// ParseSubtargetFeatures - Parses features string setting specified + /// subtarget options. Definition of function is auto generated by tblgen. + void ParseSubtargetFeatures(StringRef CPU, StringRef FS); + VESubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); + + /// Given a actual stack size as determined by FrameInfo, this function + /// returns adjusted framesize which includes space for register window + /// spills and arguments. + int getAdjustedFrameSize(int stackSize) const; + + bool isTargetLinux() const { return TargetTriple.isOSLinux(); } +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VESubtarget.cpp b/llvm/lib/Target/VE/VESubtarget.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VESubtarget.cpp @@ -0,0 +1,99 @@ +//===-- VESubtarget.cpp - VE Subtarget Information ------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the VE specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#include "VESubtarget.h" +#include "VE.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "sparc-subtarget" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "VEGenSubtargetInfo.inc" + +void VESubtarget::anchor() {} + +VESubtarget &VESubtarget::initializeSubtargetDependencies(StringRef CPU, + StringRef FS) { + // Determine default and user specified characteristics + std::string CPUName = CPU; + if (CPUName.empty()) + CPUName = "ve"; + + // Parse features string. + ParseSubtargetFeatures(CPUName, FS); + + return *this; +} + +VESubtarget::VESubtarget(const Triple &TT, const std::string &CPU, + const std::string &FS, const TargetMachine &TM) + : VEGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT), Vectorize(false), + InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this), + FrameLowering(*this) {} + +int VESubtarget::getAdjustedFrameSize(int frameSize) const { + + // VE stack frame: + // + // +----------------------------------------+ + // | Locals and temporaries | + // +----------------------------------------+ + // | Parameter area for callee | + // 176(fp) | | + // +----------------------------------------+ + // | Register save area (RSA) for callee | + // | | + // 16(fp) | 20 * 8 bytes | + // +----------------------------------------+ + // 8(fp) | Return address | + // +----------------------------------------+ + // 0(fp) | Frame pointer of caller | + // --------+----------------------------------------+-------- + // | Locals and temporaries for callee | + // +----------------------------------------+ + // | Parameter area for callee of callee | + // +----------------------------------------+ + // 16(sp) | RSA for callee of callee | + // +----------------------------------------+ + // 8(sp) | Return address | + // +----------------------------------------+ + // 0(sp) | Frame pointer of callee | + // +----------------------------------------+ + + // RSA frame: + // +----------------------------------------------+ + // 168(fp) | %s33 | + // +----------------------------------------------+ + // | %s19...%s32 | + // +----------------------------------------------+ + // 48(fp) | %s18 | + // +----------------------------------------------+ + // 40(fp) | Linkage area register (%s17) | + // +----------------------------------------------+ + // 32(fp) | Procedure linkage table register (%plt=%s16) | + // +----------------------------------------------+ + // 24(fp) | Global offset table register (%got=%s15) | + // +----------------------------------------------+ + // 16(fp) | Thread pointer register (%tp=%s14) | + // +----------------------------------------------+ + + frameSize += 176; // for RSA, RA, and FP + frameSize = alignTo(frameSize, 16); // requires 16 bytes alignment + + return frameSize; +} + +bool VESubtarget::enableMachineScheduler() const { return true; } diff --git a/llvm/lib/Target/VE/VETargetMachine.h b/llvm/lib/Target/VE/VETargetMachine.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VETargetMachine.h @@ -0,0 +1,57 @@ +//===-- VETargetMachine.h - Define TargetMachine for VE ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the VE specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VETARGETMACHINE_H +#define LLVM_LIB_TARGET_VE_VETARGETMACHINE_H + +#include "VEInstrInfo.h" +#include "VESubtarget.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class VETargetMachine : public LLVMTargetMachine { + std::unique_ptr TLOF; + VESubtarget Subtarget; + // Hold Strings that can be free'd all together with VETargetMachine + // e.g.: "GCC_except_tableXX" string. + std::list StrList; + +public: + VETargetMachine(const Target &T, const Triple &TT, StringRef CPU, + StringRef FS, const TargetOptions &Options, + Optional RM, Optional CM, + CodeGenOpt::Level OL, bool JIT); + ~VETargetMachine() override; + + const VESubtarget *getSubtargetImpl() const { return &Subtarget; } + const VESubtarget *getSubtargetImpl(const Function &) const override { + return &Subtarget; + } + std::list *getStrList() const { + return const_cast *>(&StrList); + } + + // Pass Pipeline Configuration + TargetPassConfig *createPassConfig(PassManagerBase &PM) override; + TargetLoweringObjectFile *getObjFileLowering() const override { + return TLOF.get(); + } + + bool isMachineVerifierClean() const override { return false; } + + TargetTransformInfo getTargetTransformInfo(const Function &F) override; +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Target/VE/VETargetMachine.cpp b/llvm/lib/Target/VE/VETargetMachine.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VETargetMachine.cpp @@ -0,0 +1,135 @@ +//===-- VETargetMachine.cpp - Define TargetMachine for VE -----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +#include "VETargetMachine.h" +#include "VE.h" +// #include "VETargetObjectFile.h" +#include "VETargetTransformInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "ve" + +extern "C" void LLVMInitializeVETarget() { + // Register the target. + RegisterTargetMachine X(getTheVETarget()); +} + +static std::string computeDataLayout(const Triple &T) { + // Aurora VE is little endian + std::string Ret = "e"; + + // Use ELF mangling + Ret += "-m:e"; + + // Alignments for 64 bit integers. + Ret += "-i64:64"; + + // VE supports 32 bit and 64 bits integer on registers + Ret += "-n32:64"; + + // Stack alignment is 64 bits + Ret += "-S64"; + + // Vector alignments are 64 bits + // Need to define all of them. Otherwise, each alignment becomes + // the size of each data by default. + Ret += "-v64:64:64"; // for v2f32 + Ret += "-v128:64:64"; + Ret += "-v256:64:64"; + Ret += "-v512:64:64"; + Ret += "-v1024:64:64"; + Ret += "-v2048:64:64"; + Ret += "-v4096:64:64"; + Ret += "-v8192:64:64"; + Ret += "-v16384:64:64"; // for v256f64 + + return Ret; +} + +static Reloc::Model getEffectiveRelocModel(Optional RM) { + if (!RM.hasValue()) + return Reloc::Static; + return *RM; +} + +class VEELFTargetObjectFile : public TargetLoweringObjectFileELF { + void Initialize(MCContext &Ctx, const TargetMachine &TM) override { + TargetLoweringObjectFileELF::Initialize(Ctx, TM); + InitializeELF(TM.Options.UseInitArray); + } +}; + +static std::unique_ptr createTLOF() { + return std::make_unique(); +} + +/// Create an Aurora VE architecture model +VETargetMachine::VETargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Optional RM, + Optional CM, + CodeGenOpt::Level OL, bool JIT) + : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, + getEffectiveRelocModel(RM), + getEffectiveCodeModel(CM, CodeModel::Small), OL), + // TLOF(make_unique()), + TLOF(createTLOF()), Subtarget(TT, CPU, FS, *this) { + initAsmInfo(); +} + +VETargetMachine::~VETargetMachine() {} + +TargetTransformInfo VETargetMachine::getTargetTransformInfo(const Function &F) { + return TargetTransformInfo(VETTIImpl(this, F)); +} + +namespace { +/// VE Code Generator Pass Configuration Options. +class VEPassConfig : public TargetPassConfig { +public: + VEPassConfig(VETargetMachine &TM, PassManagerBase &PM) + : TargetPassConfig(TM, PM) {} + + VETargetMachine &getVETargetMachine() const { + return getTM(); + } + + void addIRPasses() override; + bool addInstSelector() override; + void addPreEmitPass() override; +}; +} // namespace + +TargetPassConfig *VETargetMachine::createPassConfig(PassManagerBase &PM) { + return new VEPassConfig(*this, PM); +} + +void VEPassConfig::addIRPasses() { + addPass(createAtomicExpandPass()); + TargetPassConfig::addIRPasses(); +} + +bool VEPassConfig::addInstSelector() { + addPass(createVEISelDag(getVETargetMachine())); + return false; +} + +void VEPassConfig::addPreEmitPass() { + // LVLGen should be called after scheduling and register allocation + addPass(createLVLGenPass()); +} diff --git a/llvm/lib/Target/VE/VETargetTransformInfo.h b/llvm/lib/Target/VE/VETargetTransformInfo.h new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/VETargetTransformInfo.h @@ -0,0 +1,59 @@ +//===- VETargetTransformInfo.h - VE specific TTI ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file a TargetTransformInfo::Concept conforming object specific to the +/// VE target machine. It uses the target's detailed information to +/// provide more precise answers to certain TTI queries, while letting the +/// target independent and default TTI implementations handle the rest. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_VE_VETARGETTRANSFORMINFO_H +#define LLVM_LIB_TARGET_VE_VETARGETTRANSFORMINFO_H + +#include "VE.h" +#include "VETargetMachine.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/CodeGen/BasicTTIImpl.h" + +namespace llvm { + +class VETTIImpl : public BasicTTIImplBase { + using BaseT = BasicTTIImplBase; + friend BaseT; + + const VESubtarget *ST; + const VETargetLowering *TLI; + + const VESubtarget *getST() const { return ST; } + const VETargetLowering *getTLI() const { return TLI; } + +public: + explicit VETTIImpl(const VETargetMachine *TM, const Function &F) + : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), + TLI(ST->getTargetLowering()) {} + + unsigned getNumberOfRegisters(unsigned ClassID) const { return 64; } + + unsigned getRegisterBitWidth(bool Vector) const { + if (Vector) { + return 256 * 64; + } + return 64; + } + + unsigned getMinVectorRegisterBitWidth() const { return 256 * 64; } + + bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) { return false; } + bool isLegalMaskedGather(Type *DataType) { return false; }; + bool isLegalMaskedScatter(Type *DataType) { return false; }; +}; + +} // namespace llvm + +#endif // LLVM_LIB_TARGET_VE_VETARGETTRANSFORMINFO_H diff --git a/llvm/lib/Target/VE/gen-veintrin.sh b/llvm/lib/Target/VE/gen-veintrin.sh new file mode 100755 --- /dev/null +++ b/llvm/lib/Target/VE/gen-veintrin.sh @@ -0,0 +1,24 @@ +#! /bin/sh + +#set -v + +F=lib/Target/VE/veintrin.py + +if test ! -f $F; then + echo "Error. Run in llvm directory" + exit +fi + + +python $F --inst > lib/Target/VE/VEInstrVecVL.gen.td +python $F -p > lib/Target/VE/VEInstrIntrinsicVL.gen.td +python $F -i > include/llvm/IR/IntrinsicsVEVL.gen.td +python $F -b > ../clang/include/clang/Basic/BuiltinsVEVL.gen.def +python $F --veintrin > ../clang/lib/Headers/velintrin_gen.h +python $F --vl-index > lib/Target/VE/vl-index.inc + +#python $F --html > velintrin.html + +#python $F -r > ../../llvm-ve-intrinsic-test/gen/ref.cc +#python $F --decl > ../../llvm-ve-intrinsic-test/decl.h +#python $F -t diff --git a/llvm/lib/Target/VE/veintrin.py b/llvm/lib/Target/VE/veintrin.py new file mode 100755 --- /dev/null +++ b/llvm/lib/Target/VE/veintrin.py @@ -0,0 +1,1444 @@ +#! /usr/bin/python + +import re +import sys +from functools import partial + +class Type: + def __init__(self, ValueType, builtinCode, intrinDefType, ctype, elemType = None): + self.ValueType = ValueType # v256f64, f64, f32, i64, ... + self.builtinCode = builtinCode # V256d, d, f, ... + self.intrinDefType = intrinDefType # LLVMType, ... + self.ctype = ctype + self.elemType = elemType + + def isVectorType(self): + return self.elemType != None + + def stride(self): + if self.isVectorType(): + if self.elemType in [T_f64, T_i64, T_u64]: + return 8 + else: + return 4 + raise Exception("not a vector type") + +T_f64 = Type("f64", "d", "LLVMType", "double") +T_f32 = Type("f32", "f", "LLVMType", "float") +T_i64 = Type("i64", "Li", "LLVMType", "long int") +T_i32 = Type("i32", "i", "LLVMType", "int", "I32") +T_u64 = Type("i64", "LUi", "LLVMType", "unsigned long int") +T_u32 = Type("i32", "Ui", "LLVMType", "unsigned int") +T_voidp = Type("i64", "v*", "llvm_ptr_ty", "void*") +T_voidcp = Type("i64", "vC*", "llvm_ptr_ty", "void const*") + +T_v256f64 = Type("v256f64", "V256d", "LLVMType", "double*", T_f64) +T_v256f32 = Type("v256f64", "V256d", "LLVMType", "float*", T_f32) +T_v256i64 = Type("v256f64", "V256d", "LLVMType", "long int*", T_i64) +T_v256i32 = Type("v256f64", "V256d", "LLVMType", "int*", T_i32) +T_v256u64 = Type("v256f64", "V256d", "LLVMType", "unsigned long int*", T_u64) +T_v256u32 = Type("v256f64", "V256d", "LLVMType", "unsigned int*", T_u32) + +T_v4u64 = Type("v4i64", "V4ULi", "LLVMType", "unsigned int*", T_u64) # for VM +T_v8u64 = Type("v8i64", "V8ULi", "LLVMType", "unsigned int*", T_u64) # for VM512 + +#T_v8u32 = Type("v8i32", "V8ULi", "unsigned int*", T_u32) +#T_v16u32 = Type("v16i32", "V16ULi", "unsigned int*", T_u32) + +class Op(object): + def __init__(self, kind, ty, name, regClass): + self.kind = kind + self.ty_ = ty + self.name_ = name + self.regClass_ = regClass + + def regClass(self): return self.regClass_ + def intrinDefType(self): return self.ty_.intrinDefType + def ValueType(self): return self.ty_.ValueType + def builtinCode(self): return self.ty_.builtinCode + def elemType(self): return self.ty_.elemType + def ctype(self): return self.ty_.ctype + def stride(self): return self.ty_.stride() + + def dagOp(self): + if self.kind == 'I' or self.kind == 'Z': + return "({} {}:${})".format(self.ty_.ValueType, self.immType, self.name_) + else: + return "{}:${}".format(self.ty_.ValueType, self.name_) + + def isImm(self): return self.kind == 'I' or self.kind == 'N' or self.kind == "Z" + def isReg(self): return self.kind == 'v' or self.kind == 's' + def isSReg(self): return self.kind == 's' or self.kind == 'f' + def isVReg(self): return self.kind == 'v' + def isMask(self): return self.kind == 'm' or self.kind == 'M' + def isMask256(self): return self.kind == 'm' + def isMask512(self): return self.kind == 'M' + def isVL(self): return self.kind == 'l' + + def regName(self): + return self.name_ + + def formalName(self): + if self.isVReg() or self.isMask(): + return "p" + self.name_ + else: + return self.name_ + + def VectorType(self): + if self.isVReg(): + return "__vr" + elif self.isMask512(): + return "__vm512" + elif self.isMask(): + return "__vm256" + raise Exception("not a vector type: {}".format(self.kind)) + +def VOp(ty, name): + if ty == T_f64: return Op("v", T_v256f64, name, "V64") + elif ty == T_f32: return Op("v", T_v256f32, name, "V64") + elif ty == T_i64: return Op("v", T_v256i64, name, "V64") + elif ty == T_i32: return Op("v", T_v256i32, name, "V64") + elif ty == T_u64: return Op("v", T_v256u64, name, "V64") + elif ty == T_u32: return Op("v", T_v256u32, name, "V64") + else: raise Exception("unknown type") + +def SOp(ty, name): + if ty in [T_f64, T_i64, T_u64, T_voidp, T_voidcp]: + return Op("s", ty, name, "I64") + elif ty == T_f32: return Op("s", ty, name, "F32") + elif ty == T_i32: return Op("s", ty, name, "I32") + elif ty == T_u32: return Op("s", ty, name, "I32") + else: raise Exception("unknown type: {}".format(ty.ValueType)) + +def SX(ty): return SOp(ty, "sx") +def SY(ty): return SOp(ty, "sy") +def SZ(ty): return SOp(ty, "sz") +def SW(ty): return SOp(ty, "sw") + +def VX(ty): return VOp(ty, "vx") +def VY(ty): return VOp(ty, "vy") +def VZ(ty): return VOp(ty, "vz") +def VW(ty): return VOp(ty, "vw") +def VD(ty): return VOp(ty, "vd") # pass through + +VL = Op("l", T_u32, "vl", "I32") +VM = Op("m", T_v4u64, "vm", "VM") +VMX = Op("m", T_v4u64, "vmx", "VM") +VMY = Op("m", T_v4u64, "vmy", "VM") +VMZ = Op("m", T_v4u64, "vmz", "VM") +VMD = Op("m", T_v4u64, "vmd", "VM") # pass through +VM512 = Op("M", T_v8u64, "vm", "VM512") +VMX512 = Op("M", T_v8u64, "vmx", "VM512") +VMY512 = Op("M", T_v8u64, "vmy", "VM512") +VMZ512 = Op("M", T_v8u64, "vmz", "VM512") +VMD512 = Op("M", T_v8u64, "vmd", "VM512") # pass through + +class ImmOp(Op): + def __init__(self, kind, ty, name, immType): + regClass = {T_u32:"simm7Op32", T_i32:"simm7Op32", + T_u64:"simm7Op64", T_i64:"simm7Op64"}[ty] + super(ImmOp, self).__init__(kind, ty, name, regClass) + self.immType = immType + +def ImmI(ty): return ImmOp("I", ty, "I", "simm7") # kind, type, varname +def ImmN(ty): return ImmOp("I", ty, "N", "uimm6") +def UImm7(ty): return ImmOp("I", ty, "N", "uimm7") +def ImmZ(ty): return ImmOp("Z", ty, "Z", "simm7") # FIXME: simm7? + +def Args_vvv(ty): return [VX(ty), VY(ty), VZ(ty)] +def Args_vsv(tyV, tyS = None): + if tyS == None: + tyS = tyV + return [VX(tyV), SY(tyS), VZ(tyV)] +def Args_vIv(ty): return [VX(ty), ImmI(ty), VZ(ty)] + +# inst: instruction in the manual. VFAD +# opc: op code (8bit) +# asm: vfadd.$df, vfmk.$df.$cf, vst.$nc.$ot +# llvmInst: Instruction in VEInstrVec.td. VFADdv +# intrinsicName: function name without prefix +# => _ve_{intrinsicName}, __builtin_ve_{intrinsicName}, int_ve_{intrinsicName} + +# subcode: cx, cx2, ... (4bit) +# subname: d, s, nc, ot, ... + +class Inst(object): + def __init__(self, opc, inst, asm, intrinsicName, outs, ins, **kwargs): + self.kwargs = kwargs + self.opc = opc + self.outs = outs + self.ins = ins + + self.inst_ = inst + #self.subop_ = kwargs['subop'] if 'subop' in kwargs else None + self.llvmInst_ = kwargs['llvmInst'] + self.asm_ = asm + self.intrinsicName_ = intrinsicName + self.funcPrefix_ = "_ve_" + self.llvmIntrinsicPrefix_ = "_ve_" + + self.hasTest_ = True + self.prop_ = ["IntrNoMem"] + self.hasBuiltin_ = True + self.hasPat_ = True + self.hasLLVMInstDefine_ = True + self.hasIntrinsicDef_ = True + self.notYetImplemented_ = False + self.Obsolete_ = False + + def inst(self): return self.inst_ + def llvmInst(self): return self.llvmInst_ + def intrinsicName(self): return self.intrinsicName_ + def asm(self): return self.asm_ if self.asm_ else "" + def expr(self): return None if 'expr' not in self.kwargs else self.kwargs['expr'] + def funcName(self): + return "{}{}".format(self.funcPrefix_, self.intrinsicName()) + def builtinName(self): + return "__builtin{}{}".format(self.llvmIntrinsicPrefix_, self.intrinsicName()) + def llvmIntrinName(self): + return "int{}{}".format(self.llvmIntrinsicPrefix_, self.intrinsicName()) + def isNotYetImplemented(self): return self.notYetImplemented_ + def NYI(self, flag = True): + self.notYetImplemented_ = flag + if flag: + self.hasTest_ = False + return self + def Obsolete(self): self.Obsolete_ = True + def isObsolete(self): return self.Obsolete_ + + # difference among dummy and pseudo + # dummy: instructions to insert a entry into the manual + # pseudo: instructions without opcode, ie: no machine instruction + + # predicates + def isDummy(self): return False + def isMasked(self): return any([op.regName() == "vm" for op in self.ins]) + def isPacked(self): return ('packed' in self.kwargs) and self.kwargs['packed'] + #def isPseudo(self): return self.opc == None + + def noLLVMInstDefine(self): self.hasLLVMInstDefine_ = False + def hasLLVMInstDefine(self): + return self.hasLLVMInstDefine_ and (not self.isDummy()) + + def hasIntrinsicDef(self): return self.hasIntrinsicDef_ + + def noLLVM(self): + self.hasLLVMInstDefine_ = False + self.hasPat_ = False + self.hasIntrinsicDef_ = False + self.hasBuiltin_ = False + return self + + def hasPassThroughOp(self): return any([op.regName() == "vd" for op in self.ins]) + def hasPassThroughMaskOp(self): return any([op.regName() == "vmd" for op in self.ins]) + def hasImmOp(self): return any([op.isImm() for op in self.ins]) + def hasVLOp(self): return any([op.isVL() for op in self.ins]) + + def noBuiltin(self): self.hasBuiltin_ = False + def hasBuiltin(self): return self.hasBuiltin_ + + def hasMask(self): + if len(self.outs) > 0 and self.outs[0].isMask(): + return True + return any([op.isMask() for op in self.ins]) + + def readMem(self): + self.prop_ = ["IntrReadMem"] + return self + + def writeMem(self): + self.prop_ = ["IntrWriteMem"] + return self + + def inaccessibleMemOrArgMemOnly(self): + self.prop_ = ["IntrInaccessibleMemOrArgMemOnly"] + return self + + def hasSideEffects(self): + self.prop_ = ["IntrHasSideEffects"] + + def prop(self): + return self.prop_ + + def hasInst(self): return self.inst_ != None + + def instDefine(self): + print("// inst={} asm={} intrisic={}".format(self.inst(), self.asm(), self.intrinsicName())) + + def fmtOps(ops): + return ", ".join(["{}:${}".format(op.regClass(), op.regName()) for op in ops]) + + outs = fmtOps(self.outs) + ins = fmtOps(self.ins) + tmp = [op for op in self.ins if op.regName() not in ["vd", "vl", "vmd"]] + asmArgs = ",".join(["${}".format(op.regName()) for op in self.outs + tmp]) + + instName = self.llvmInst() + + if self.opc: + s = "def {} : RV<0x{:x}, (outs {}), (ins {}),\n".format(instName, self.opc, outs, ins) + s += ' "{} {}",'.format(self.asm(), asmArgs) # asmstr + s += " [], NoItinerary>\n" # pattern + else: + s = "def {} : Pseudo<(outs {}), (ins {}),\n".format(instName, outs, ins) + s += ' "# {} {}",'.format(self.asm(), asmArgs) # asmstr + s += " []>\n" # pattern + s += "{\n" +# if self.opc: +# if len(self.ins) > 2 and self.ins[1].kind == "s": +# s += ' let cs = 1;\n' +# if self.isPacked(): +# s += ' let cx = 1;\n' +# s += ' let cx2 = 1;\n' +# if self.isMasked(): +# s += ' bits<4> vm;\n' +# s += ' let m = vm;\n' + if self.hasPassThroughOp(): + s += ' let Constraints = "${} = $vd";\n'.format(self.outs[0].regName()) + if self.hasPassThroughMaskOp(): + s += ' let Constraints = "${} = $vmd";\n'.format(self.outs[0].regName()) + s += ' let DecoderNamespace = "VEL";\n' + s += ' let isCodeGenOnly = 1;\n' + if self.hasVLOp(): + s += ' let DisableEncoding = "$vl";\n' + s += "}\n" + return s + + # to be included from IntrinsicsVE.td + def intrinsicDefine(self): + outs = ", ".join(["{}".format(op.intrinDefType()) for op in self.outs]) + ins = ", ".join(["{}".format(op.intrinDefType()) for op in self.ins]) + + prop = ', '.join(self.prop()) + + intrinName = "{}".format(self.llvmIntrinName()) + builtinName = "GCCBuiltin<\"{}\"".format(self.builtinName()) + + return "let TargetPrefix = \"ve\" in def {} : {}>, Intrinsic<[{}], [{}], [{}]>;".format(intrinName, builtinName, outs, ins, prop) + + # to be included from BuiltinsVE.def + def builtin(self): + if len(self.outs) == 0: + tmp = "v" + else: + tmp = "".join([i.builtinCode() for i in self.outs]) + tmp += "".join([i.builtinCode() for i in self.ins]) + return "BUILTIN({}, \"{}\", \"n\")".format(self.builtinName(), tmp) + + # to be included from veintrin.h + def veintrin(self): + return "#define {} {}".format(self.funcName(), self.builtinName()) + + def noTest(self): + self.hasTest_ = False + return self + + def hasTest(self): + return self.hasTest_ + + def stride(self, op): + return 8 if self.isPacked() else op.stride() + + def hasExpr(self): return self.expr() != None + + def noPat(self): self.hasPat_ = False + def hasPat(self): return self.hasPat_ + +class DummyInst(Inst): + def __init__(self, opc, inst, func, asm, **kwargs): + kwargs['llvmInst'] = None + super(DummyInst, self).__init__(opc, inst, asm, func, None, None, **kwargs) + self.func_ = func + def func(self): return self.func_ + def isDummy(self): return True + +class InstVEL(Inst): + def __init__(self, opc, inst, asm, intrinsicName, outs, ins, **kwargs): + #sys.stderr.write("inst={} subop={} asm={}\n".format(inst, kwargs['subop'], asm)) + if 'llvmInst' not in kwargs: + if asm: + suffix = "".join([op.kind for op in outs + ins]) + llvmInst = re.sub("\.", "", asm) + "_" + suffix + else: + llvmInst = None + kwargs['llvmInst'] = llvmInst + + super(InstVEL, self).__init__(opc, inst, asm, intrinsicName, outs, ins, **kwargs) + + self.funcPrefix_ = "_vel_" + self.llvmIntrinsicPrefix_ = "_ve_vl_" # we have to start from "_ve_" in LLVM + + def pattern(self): + args = ", ".join([op.dagOp() for op in self.ins]) + l = "({} {})".format(self.llvmIntrinName(), args) + r = "({} {})".format(self.llvmInst(), args) + return "def : Pat<{}, {}>;".format(l, r) + + +class TestFunc: + def __init__(self, header, definition, ref): + self.header_ = header + self.definition_ = definition + self.ref_ = ref + + def header(self): + return self.header_ + + def definition(self): + return self.definition_ + + def reference(self): + return self.ref_ + + def decl(self): + return "extern {};".format(self.header_) + +class TestGeneratorMask: + def gen(self, I): + header = "void {}(unsigned long int* px, unsigned long int const* py, unsigned long int* pz, int n)".format(I.intrinsicName()) + + args = ", ".join([op.regName() for op in I.ins]) + + is512 = I.outs[0].isMask512() + + if (is512): + vm = "__vm512" + m = "M" + l = 8 + else: + vm = "__vm256" + m = "m" + l = 4 + + lvm = "" + svm = "" + for i in range(l): + lvm += " vmy = _vel_lvm_{m}{m}ss(vmy, {i}, py[{i}]);\n".format(m=m, i=i) + lvm += " vmz = _vel_lvm_{m}{m}ss(vmz, {i}, pz[{i}]);\n".format(m=m, i=i) + svm += " px[{i}] = _vel_svm_s{m}s(vmx, {i});\n".format(m=m, i=i) + + func = '''#include +{header} +{{ + {vm} vmx, vmy, vmz; +{lvm} + int vl = 256; + vmx = _vel_{inst}({args}); + +{svm} +}} +'''.format(header=header, inst=I.intrinsicName(), args=args, vm=vm, lvm=lvm, svm=svm) + + if I.hasExpr(): + args = ["px[i]", "py[i]", "pz[i]"] + #line = I.expr.format(*[op.regName() for op in I.outs + I.ins]) + line = I.expr().format(*args) + ref = '''{header} +{{ + for (int i = 0; i < {l}; ++i) + {line}; +}} +'''.format(header=header, line=line, l=l) + else: + ref = None + + return TestFunc(header, func, ref); + +class TestGenerator: + def funcHeader(self, I): + tmp = [i for i in (I.outs + I.ins) if (not i.isImm()) and (not i.isVL())] + args = ["{} {}".format(i.ctype(), i.formalName()) for i in tmp] + + name = I.intrinsicName() + if I.hasImmOp(): + name = name + "_imm" + + return "void {name}({args}, int n)".format(name=name, args=", ".join(args)) + + def get_vld_vst_inst(self, I, op): + vld = "vld_vssl" + vst = "vst_vssl" + if not I.isPacked(): + if op.elemType() == T_f32: + vld = "vldu_vssl" + vst = "vstu_vssl" + elif op.elemType() == T_i32 or op.elemType() == T_u32: + vld = "vldlsx_vssl" + vst = "vstl_vssl" + return [vld, vst] + + def test_(self, I): + head = self.funcHeader(I) + + out = I.outs[0] + body = "" + indent = " " * 8 + + #print(I.instName) + + if I.isPacked(): + step = 512 + body += indent + "int l = n - i < 512 ? (n - i) / 2UL : 256;\n" + else: + step = 256 + body += indent + "int l = n - i < 256 ? n - i : 256;\n" + + ins = I.ins + if I.hasMask() and I.ins[-1].isVReg(): # remove vd when vm, vd + ins = I.ins[0:-1] + + # input + args = [] + for op in ins: + if op.isVReg(): + stride = I.stride(op) + vld, vst = self.get_vld_vst_inst(I, op) + body += indent + "__vr {} = _vel_{}({}, p{}, l);\n".format(op.regName(), vld, stride, op.regName()) + if op.isMask512(): + # FIXME + stride = I.stride(op) + #vld, vst = self.get_vld_vst_inst(I, op) + body += indent + "__vr {}0 = _vel_vld_vssl({}, p{}, l);\n".format(op.regName(), stride, op.regName()) + body += indent + "__vm512 {} = _vel_pvfmkwgt_Mvl({}0, l);\n".format(op.regName(), op.regName()) + elif op.isMask(): + stride = I.stride(op) + #vld, vst = self.get_vld_vst_inst(I, op) + body += indent + "__vr {}0 = _vel_vldlzx_vssl(4, p{}, l);\n".format(op.regName(), op.regName(), stride) + body += indent + "__vm256 {} = _vel_vfmkwgt_mvl({}0, l);\n".format(op.regName(), op.regName()) + if op.isReg() or op.isMask(): + args.append(op.regName()) + elif op.isImm(): + args.append("3") + + if I.hasMask(): + op = I.outs[0] + vld, vst = self.get_vld_vst_inst(I, op) + stride = I.stride(op) + body += indent + "__vr {} = _vel_{}({}, p{}, l);\n".format(op.regName(), vld, stride, op.regName()) + body += indent + "{} = _vel_{}({}, l);\n".format(out.regName(), I.intrinsicName(), ', '.join(args)) + else: + body += indent + "__vr {} = _vel_{}({}, l);\n".format(out.regName(), I.intrinsicName(), ', '.join(args)) + + if out.isVReg(): + stride = I.stride(out) + vld, vst = self.get_vld_vst_inst(I, out) + body += indent + "_vel_{}({}, {}, {}, l);\n".format(vst, out.regName(), stride, out.formalName()) + + tmp = [] + for op in (I.outs + ins): + if op.isVReg() or op.isMask(): + tmp.append(indent + "p{} += {};".format(op.regName(), "512" if I.isPacked() else "256")) + + body += "\n".join(tmp) + + func = '''#include "velintrin.h" +{} {{ + for (int i = 0; i < n; i += {}) {{ +{} + }} +}} +''' + return func.format(head, step, body) + + def reference(self, I): + if not I.hasExpr(): + return None + + head = self.funcHeader(I) + + tmp = [] + for op in I.outs + I.ins: + if op.isVReg(): + tmp.append("p{}[i]".format(op.regName())) + elif op.isVL(): + pass + elif op.isReg(): + tmp.append(op.regName()) + elif op.isImm(): + tmp.append("3") + + body = I.expr().format(*tmp) + ";" + + preprocess = '' + for op in I.ins: + if op.isSReg(): + if I.isPacked(): + ctype = I.outs[0].elemType().ctype + preprocess = '{} sy0 = *({}*)&sy;'.format(ctype, ctype) + body = re.sub('sy', "sy0", body) + + if I.hasMask(): + body = "if (pvm[i] > 0) {{ {} }}".format(body) + + func = '''{} +{{ + {} + for (int i = 0; i < n; ++i) {{ + {} + }} +}}''' + + return func.format(head, preprocess, body); + + def gen(self, I): + return TestFunc(self.funcHeader(I), self.test_(I), self.reference(I)); + +def getTestGenerator(I): + if len(I.outs) > 0 and I.outs[0].isMask(): + return TestGeneratorMask() + return TestGenerator() + +class ManualInstPrinter: + def __init__(self): + pass + + def printAll(self, insts): + for i in insts: + self.printI(i) + + def make(self, I): + v = [] + + outType = "void" + if len(I.outs) > 0: + out = I.outs[0] + if out.isVReg(): + outType = "__vr" + v.append("{}[:]".format(out.regName())) + elif out.isMask512(): + outType = "__vm512" + v.append("{}[:]".format(out.regName())) + elif out.isMask(): + outType = "__vm256" + v.append("{}[:]".format(out.regName())) + elif out.isSReg(): + outType = out.ctype() + else: + raise Exception("unknown output operand type: {}".format(out.kind)) + #v.append(out.regName()) + + ins = [] + for op in I.ins: + if op.isVReg(): + ins.append("__vr " + op.regName()) + v.append("{}[:]".format(op.regName())) + elif op.isSReg(): + ins.append("{} {}".format(op.ctype(), op.regName())) + v.append("{}".format(op.regName())) + elif op.isMask512(): + ins.append("__vm512 {}".format(op.regName())) + v.append("{}[:]".format(op.regName())) + elif op.isMask(): + ins.append("__vm256 {}".format(op.regName())) + v.append("{}[:]".format(op.regName())) + elif op.isImm(): + ins.append("{} {}".format(op.ctype(), op.regName())) + v.append("{}".format(op.regName())) + elif op.isVL(): + ins.append("int vl".format(op.ctype())) + else: + raise Exception("unknown register kind: {}".format(op.kind)) + + func = "{} {}({})".format(outType, I.funcName(), ", ".join(ins)) + + #if outType: + # func = "{} _ve_{}({})".format(outType, intrinsicName, ", ".join(ins)) + #else: + # func = "_ve_{}({})".format(intrinsicName, ", ".join(ins)) + + if I.hasExpr(): + if I.hasMask(): + expr = I.expr().format(*v) + expr = re.sub(r'.*= ', '', expr) + expr = "{} = {} ? {} : {}".format(v[0], v[-2], expr, v[-1]) + else: + expr = I.expr().format(*v) + else: + expr = "" + return [func, expr] + + def printI(self, I): + if not I.hasExpr(): + return + + func, expr = self.make(I) + line = " {:<80} // {}".format(func, expr) + print(line) + +class HtmlManualPrinter(ManualInstPrinter): + def printAll(self, T, opt_no_link): + idx = 0 + for s in T.sections: + print("{}
".format(idx, s.name)) + idx += 1 + idx = 0 + for s in T.sections: + rowspan = {} + tmp = [] + for I in s.instsWithDummy(): + if I.isDummy(): + func = I.func() + expr = "" + else: + func, expr = self.make(I) + inst = I.inst() if I.hasInst() else "" + inst = re.sub(r'i64|i32|f64|f32', '', inst) + #print("inst={}".format(inst)) + if inst in rowspan: + rowspan[inst] += 1 + else: + rowspan[inst] = 1 + asm = I.asm() if I.opc else "" + if not opt_no_link: + asm = "{}".format(s.page, asm) + if I.isNotYetImplemented(): + func = '' + func + '[1]' + if I.isObsolete(): + func = '' + func + '[2]' + #tmp.append([inst, func, I.asm(), expr]) + tmp.append([inst, func, asm, expr]) + + print("

{}

".format(idx, s.name)) + print("") + print("") + row = 0 + for a in tmp: + inst = a.pop(0) + print("") + if row == 0: + row = rowspan[inst] + print("".format(row, inst)) + row -= 1 + print("".format(*a)) + print("
InstructionFunctionasmDescription
{}{}{}{}
") + idx += 1 + + print('

[1] Not yet implemented.

') + print('

[2] Obsolete.

') + +class InstList: + def __init__(self, clazz): + self.a = [] + self.clazz = clazz + def add(self, I): + self.a.append(I) + return self + def __iter__(self): + return self.a.__iter__() + def __getattr__(self, attrname): + def _method_missing(self, name, *args): + for i in self.a: + getattr(i, name)(*args) + return self + return partial(_method_missing, self, attrname) + +class Section: + def __init__(self, name, page): + self.name = name + self.page = page + self.a = [] + def add(self, i): + self.a.append(i) + def insts(self): + return [i for i in self.a if not i.isDummy()] + def instsWithDummy(self): + return self.a + +class InstTable(object): + def __init__(self, InstClass): + self.currentSection = [] + self.sections = [] + self.InstClass = InstClass + + def Section(self, name, page): + s = Section(name, page) + self.sections.append(s) + self.currentSection = s + + def insts(self): + a = [] + for s in self.sections: + a.extend(s.insts()) + return a + + def add(self, inst): + self.currentSection.add(inst) + return inst + + def Dummy(self, opc, inst, func, asm): + return self.add(DummyInst(opc, inst, func, asm)) + + def NoImpl(self, inst): + self.add(DummyInst(None, inst, "not yet implemented", "").NYI(True)) + + # intrinsic name is generated from asm and arguments + def Def(self, opc, inst, subop, asm, ary, expr = None, **kwargs): + baseIntrinName = kwargs['baseIntrinName'] if 'baseIntrinName' in kwargs else re.sub(r'\.', '', asm) + IL = InstList(self.InstClass) + for args in ary: + func_suffix = "_" + "".join([op.kind for op in args if op]) + intrinsicName = baseIntrinName + func_suffix + intrinsicName = re.sub(r'[INZ]', 's', intrinsicName) # replace Imm to s + outs = [args[0]] if args[0] else [] + ins = args[1:] + kwargs['packed'] = 'p' in subop + kwargs['expr'] = expr + kwargs['subop'] = subop + i = self.InstClass(opc, inst, asm, intrinsicName, outs, ins, **kwargs) + self.add(i) + IL.add(i) + return IL + + def DefM(self, opc, baseInstName, subop, asm, OL, expr = None, **kwargs): + vm = VM512 if 'p' in subop else VM + OL = self.addMask(OL, vm) + return self.Def(opc, baseInstName, subop, asm, OL, expr, **kwargs) + + def addMask(self, ary, MaskOp = VM, addVD = True): + tmp = [] + for a in ary: + if addVD: + tmp.append(a + [MaskOp, VD(a[0].elemType())]) + else: + tmp.append(a + [MaskOp]) + return ary + tmp + + def VLDm(self, opc, inst, subop, asm): + O = [] + O.append([VX(T_u64), SY(T_u64), SZ(T_voidcp)]) + O.append([VX(T_u64), ImmI(T_u64), SZ(T_voidcp)]) + #O.append([VX(T_u64), SY(T_u64), ImmZ(T_voidcp)]) + #O.append([VX(T_u64), ImmI(T_u64), ImmZ(T_voidcp)]) + + self.Def(opc, inst, subop, asm, O).noTest().readMem() + self.Def(opc, inst, subop+"nc", asm+".nc", O).noTest().readMem() + + def VSTm(self, opc, inst, asm): + O_rr = [None, VX(T_u64), SY(T_u64), SZ(T_voidp)] + O_ir = [None, VX(T_u64), ImmI(T_u64), SZ(T_voidp)] + O = self.addMask([O_rr, O_ir], addVD=False) + self.Def(opc, inst, "", asm, O).noTest().writeMem() + self.Def(opc, inst, "nc", asm+".nc", O).noTest().writeMem() + self.Def(opc, inst, "ot", asm+".ot", O).noTest().writeMem() + self.Def(opc, inst, "ncot", asm+".nc.ot", O).noTest().writeMem() + + def VBRDm(self, opc): + expr = "{0} = {1}" + self.DefM(0x8C, "VBRD", "", "vbrd", [[VX(T_f64), SY(T_f64)]], expr, baseIntrinName="vbrdd").noLLVMInstDefine() + self.DefM(0x8C, "VBRD", "", "vbrd", [[VX(T_i64), SY(T_i64)]], expr, baseIntrinName="vbrdl") + self.DefM(0x8C, "VBRD", "", "vbrd", [[VX(T_i64), ImmI(T_i64)]], expr, baseIntrinName="vbrdl") + self.DefM(0x8C, "VBRD", "", "vbrdu", [[VX(T_f32), SY(T_f32)]], expr, baseIntrinName="vbrds") + self.DefM(0x8C, "VBRD", "", "vbrdl", [[VX(T_i32), SY(T_i32)]], expr, baseIntrinName="vbrdw") + self.DefM(0x8C, "VBRD", "", "vbrdl", [[VX(T_i32), ImmI(T_i32)]], expr, baseIntrinName="vbrdw") + self.DefM(0x8C, "VBRD", "p", "pvbrd", [[VX(T_u32), SY(T_u64)]], expr) + + def VMVm(self): + O_s = [VX(T_u64), SY(T_u32), VZ(T_u64)] + O_i = [VX(T_u64), UImm7(T_u32), VZ(T_u64)] + O = self.addMask([O_s, O_i]) + self.Def(0x9C, "VMV", "", "vmv", O).noTest() + + def LVSm(self, opc): + I = self.InstClass + # Manual LLVMInstDefine + self.add(I(opc, "LVS", "lvs", "lvsl_svs", [SX(T_u64)], [VX(T_u64), SY(T_u32)], llvmInst="lvsl_svs", noVL=True).noTest()).noLLVMInstDefine() + self.add(I(opc, "LVS", "lvs", "lvsd_svs", [SX(T_f64)], [VX(T_u64), SY(T_u32)], llvmInst="lvsl_svs", noVL=True).noTest()).noLLVMInstDefine() + self.add(I(opc, "LVS", "lvs", "lvss_svs", [SX(T_f32)], [VX(T_u64), SY(T_u32)], llvmInst="lvss_svs", noVL=True).noTest()).noLLVMInstDefine() + + def Inst2f(self, opc, name, instName, expr, hasPacked = True, hasNex = False): + self.Def(opc, instName, "d", name+".d", [[VX(T_f64), VY(T_f64)]], expr) + self.Def(opc, instName, "s", name+".s", [[VX(T_f32), VY(T_f32)]], expr) + if hasPacked: + self.Def(opc, instName, "p", "p"+name, [[VX(T_f32), VY(T_f32)]], expr) + if hasNex: + self.Def(opc, instName, "d", name+".d.nex", [[VX(T_f64), VY(T_f64)]], expr) + self.Def(opc, instName, "s", name+".s.nex", [[VX(T_f32), VY(T_f32)]], expr) + if hasPacked: + self.Def(opc, instName, "p", "p"+name+".nex", [[VX(T_f32), VY(T_f32)]], expr) + + def Inst3f(self, opc, name, instName, subop, expr, hasPacked = True): + O_f64 = [Args_vvv(T_f64), Args_vsv(T_f64)] + O_f32 = [Args_vvv(T_f32), Args_vsv(T_f32)] + O_pf32 = [Args_vvv(T_f32), [VX(T_f32), SY(T_u64), VZ(T_f32)]] + + O_f64 = self.addMask(O_f64) + O_f32 = self.addMask(O_f32) + O_pf32 = self.addMask(O_pf32, VM512) + + self.Def(opc, instName, subop+"d", name+".d", O_f64, expr) + self.Def(opc, instName, subop+"s", name+".s", O_f32, expr) + if hasPacked: + self.Def(opc, instName, subop+"p", "p"+name, O_pf32, expr) + + # 3 operands, u64/u32 + def Inst3u(self, opc, name, instName, expr, hasPacked = True): + O_u64 = [Args_vvv(T_u64), Args_vsv(T_u64), Args_vIv(T_u64)] + O_u32 = [Args_vvv(T_u32), Args_vsv(T_u32), Args_vIv(T_u32)] + O_pu32 = [Args_vvv(T_u32), [VX(T_u32), SY(T_u64), VZ(T_u32)]] + + O_u64 = self.addMask(O_u64) + O_u32 = self.addMask(O_u32) + O_pu32 = self.addMask(O_pu32, VM512) + + self.Def(opc, instName, "l", name+".l", O_u64, expr) + self.Def(opc, instName, "w", name+".w", O_u32, expr) + if hasPacked: + self.Def(opc, instName, "p", "p"+name, O_pu32, expr) + + # 3 operands, i64 + def Inst3l(self, opc, name, instName, subop, expr): + O = [Args_vvv(T_i64), Args_vsv(T_i64), Args_vIv(T_i64)] + O = self.addMask(O) + self.Def(opc, instName, subop+"l", name+".l", O, expr) + + # 3 operands, i32 + def Inst3w(self, opc, name, instName, subop, expr, hasPacked = True): + O_i32 = [Args_vvv(T_i32), Args_vsv(T_i32), Args_vIv(T_i32)] + O_pi32 = [Args_vvv(T_i32), [VX(T_i32), SY(T_u64), VZ(T_i32)]] + + O_i32 = self.addMask(O_i32) + O_pi32 = self.addMask(O_pi32, VM512) + + self.Def(opc, instName, subop + "wsx", name+".w.sx", O_i32, expr) + self.Def(opc, instName, subop + "wzx", name+".w.zx", O_i32, expr) + if hasPacked: + self.Def(opc, instName, subop + "p", "p"+name, O_pi32, expr) + + def Inst3divbys(self, opc, name, instName, subop, ty): + O_s = [VX(ty), VY(ty), SY(ty)] + O_i = [VX(ty), VY(ty), ImmI(ty)] + O = [O_s, O_i] + O = self.addMask(O) + self.Def(opc, instName, subop, name, O, "{0} = {1} / {2}") + + def Logical(self, opc, name, instName, expr): + O_u32_vsv = [VX(T_u32), SY(T_u64), VZ(T_u32)] + + Args = [Args_vvv(T_u64), Args_vsv(T_u64)] + Args = self.addMask(Args) + + ArgsP = [Args_vvv(T_u32), O_u32_vsv] + ArgsP = self.addMask(ArgsP, VM512) + + self.Def(opc, instName, "", name, Args, expr) + self.Def(opc, instName, "p", "p"+name+".lo", ArgsP, expr) + self.Def(opc, instName, "p", "p"+name+".up", ArgsP, expr) + self.Def(opc, instName, "p", "p"+name, ArgsP, expr) + + def Shift(self, opc, name, instName, ty, expr): + O_vvv = [VX(ty), VZ(ty), VY(T_u64)] + O_vvs = [VX(ty), VZ(ty), SY(T_u64)] + O_vvN = [VX(ty), VZ(ty), ImmN(T_u64)] + + OL = [O_vvv, O_vvs, O_vvN] + OL = self.addMask(OL); + + self.Def(opc, instName, "", name, OL, expr) + + def ShiftPacked(self, opc, name, instName, ty, expr): + O_vvv = [VX(ty), VZ(ty), VY(T_u32)] + O_vvs = [VX(ty), VZ(ty), SY(T_u64)] + + OL = [O_vvv, O_vvs] + OL = self.addMask(OL, VM512) + + self.Def(opc, instName, "p", "p"+name+".lo", OL, expr) + self.Def(opc, instName, "p", "p"+name+".up", OL, expr) + self.Def(opc, instName, "p", "p"+name, OL, expr) + + def Inst4f(self, opc, name, instName, expr): + O_f64_vvvv = [VX(T_f64), VY(T_f64), VZ(T_f64), VW(T_f64)] + O_f64_vsvv = [VX(T_f64), SY(T_f64), VZ(T_f64), VW(T_f64)] + O_f64_vvsv = [VX(T_f64), VY(T_f64), SY(T_f64), VW(T_f64)] + + O_f32_vvvv = [VX(T_f32), VY(T_f32), VZ(T_f32), VW(T_f32)] + O_f32_vsvv = [VX(T_f32), SY(T_f32), VZ(T_f32), VW(T_f32)] + O_f32_vvsv = [VX(T_f32), VY(T_f32), SY(T_f32), VW(T_f32)] + + O_pf32_vsvv = [VX(T_f32), SY(T_u64), VZ(T_f32), VW(T_f32)] + O_pf32_vvsv = [VX(T_f32), VY(T_f32), SY(T_u64), VW(T_f32)] + + O_f64 = [O_f64_vvvv, O_f64_vsvv, O_f64_vvsv] + O_f32 = [O_f32_vvvv, O_f32_vsvv, O_f32_vvsv] + O_pf32 = [O_f32_vvvv, O_pf32_vsvv, O_pf32_vvsv] + + O_f64 = self.addMask(O_f64) + O_f32 = self.addMask(O_f32) + O_pf32 = self.addMask(O_pf32, VM512) + + self.Def(opc, instName, "d", name+".d", O_f64, expr) + self.Def(opc, instName, "s", name+".s", O_f32, expr) + self.Def(opc, instName, "p", "p"+name, O_pf32, expr) + + def FLm(self, opc, inst, subop, asm, args): + self.Def(opc, inst, subop.format(fl="f"), asm.format(fl=".fst"), args) + self.Def(opc, inst, subop.format(fl="l"), asm.format(fl=".lst"), args).noTest() + + def VGTm(self, opc, inst, subop, asm): + O = [] + O.append([VX(T_u64), VY(T_u64), SY(T_u64), SZ(T_u64)]) + O.append([VX(T_u64), VY(T_u64), SY(T_u64), ImmZ(T_u64)]) + O.append([VX(T_u64), VY(T_u64), ImmI(T_u64), SZ(T_u64)]) + O.append([VX(T_u64), VY(T_u64), ImmI(T_u64), ImmZ(T_u64)]) + O = self.addMask(O, VM, False) + self.Def(opc, inst, subop, asm, O).noTest().readMem() + self.Def(opc, inst, subop+"nc", asm+".nc", O).noTest().readMem() + + def VSCm(self, opc, inst0, inst, asm): + O = [] + O.append([None, VX(T_u64), VY(T_u64), SY(T_u64), SZ(T_u64)]) + O.append([None, VX(T_u64), VY(T_u64), SY(T_u64), ImmZ(T_u64)]) + O.append([None, VX(T_u64), VY(T_u64), ImmI(T_u64), SZ(T_u64)]) + O.append([None, VX(T_u64), VY(T_u64), ImmI(T_u64), ImmZ(T_u64)]) + O = self.addMask(O, VM, False) + self.Def(opc, inst0, "", asm, O).noTest().writeMem() + self.Def(opc, inst0, "nc", asm+".nc", O).noTest().writeMem() + self.Def(opc, inst0, "ot", asm+".ot", O).noTest().writeMem() + self.Def(opc, inst0, "ncot", asm+".nc.ot", O).noTest().writeMem() + + def VSUM(self, opc, inst, subop, asm, baseOps): + OL = [] + for op in baseOps: + OL.append(op) + OL.append(op + [VM]) + self.Def(opc, inst, subop, asm, OL, noPassThrough=True) + + def VFIX(self, opc, inst, subop, asm, OL, ty): + expr = "{0} = (" + ty + ")({1}+0.5)" + self.DefM(opc, inst, subop, asm, OL, expr) + expr = "{0} = (" + ty + ")({1})" + self.DefM(opc, inst, subop + "rz", asm+".rz", OL, expr) + +class InstTableVEL(InstTable): + def __init__(self): + super(InstTableVEL, self).__init__(InstVEL) + + def Def(self, opc, inst, subop, asm, ary, expr = None, **kwargs): + # append dummyOp(pass through Op) and VL + newary = [] + for args in ary: + outs = [args[0]] + ins = args[1:] + if ('noVL' not in kwargs) or (not kwargs['noVL']): + newary.append(outs + ins + [VL]) + noPassThrough = ('noPassThrough' in kwargs) and (kwargs['noPassThrough']) + hasPassThroughOp = any([op.regName() == "vd" for op in ins]) + if (not noPassThrough) and (not hasPassThroughOp) and (outs[0] and outs[0].kind == "v"): + newary.append(outs + ins + [VD(outs[0].elemType()), VL]) + else: + newary.append(args) + + return super(InstTableVEL, self).Def(opc, inst, subop, asm, newary, expr, **kwargs) + +def createInstructionTable(): + T = InstTableVEL() + + # + # Start of instruction definition + # + + T.Section("Table 3-15 Vector Transfer Instructions", 22) + T.VLDm(0x81, "VLD", "", "vld") + T.VLDm(0x82, "VLDU", "", "vldu") + T.VLDm(0x83, "VLDL", "sx", "vldl.sx") + T.VLDm(0x83, "VLDL", "zx", "vldl.zx") + T.VLDm(0xC1, "VLD2D", "", "vld2d") + T.VLDm(0xC2, "VLDU2D", "", "vldu2d") + T.VLDm(0xC3, "VLDL2D", "sx", "vldl2d.sx") + T.VLDm(0xC3, "VLDL2D", "zx", "vldl2d.zx") + T.VSTm(0x91, "VST", "vst") + T.VSTm(0x92, "VSTU", "vstu") + T.VSTm(0x93, "VSTL", "vstl") + T.VSTm(0xD1, "VST2D", "vst2d") + T.VSTm(0xD2, "VSTU2D", "vstu2d") + T.VSTm(0xD3, "VSTL2D", "vstl2d") + T.Def(0x80, "PFCHV", "", "pfchv", [[None, SY(T_i64), SZ(T_voidcp)]]).noTest().inaccessibleMemOrArgMemOnly() + T.Def(0x80, "PFCHV", "", "pfchv", [[None, ImmI(T_i64), SZ(T_voidcp)]]).noTest().inaccessibleMemOrArgMemOnly() + T.Def(0x80, "PFCHV", "nc", "pfchv.nc", [[None, SY(T_i64), SZ(T_voidcp)]]).noTest().inaccessibleMemOrArgMemOnly() + T.Def(0x80, "PFCHV", "nc", "pfchv.nc", [[None, ImmI(T_i64), SZ(T_voidcp)]]).noTest().inaccessibleMemOrArgMemOnly() + T.Def(0x8E, "LSV", "", "lsv", [[VX(T_u64), VD(T_u64), SY(T_u32), SZ(T_u64)]], noVL=True).noTest().noLLVMInstDefine() + T.LVSm(0x9E) + T.Def(0xB7, "LVM", "r", "lvm", [[VMX, VMD, SY(T_u64), SZ(T_u64)]], noVL=True).noTest().NYI() + T.Def(0xB7, "LVM", "i", "lvm", [[VMX, VMD, ImmN(T_u64), SZ(T_u64)]], noVL=True).noTest() + T.Def(None, "LVM", "pr", "lvm", [[VMX512, VMD512, SY(T_u64), SZ(T_u64)]], noVL=True).noTest().NYI() + T.Def(None, "LVM", "pi", "lvm", [[VMX512, VMD512, ImmN(T_u64), SZ(T_u64)]], noVL=True).noTest() + T.Def(0xA7, "SVM", "r", "svm", [[SX(T_u64), VMZ, SY(T_u64)]], noVL=True).noTest().NYI() + T.Def(0xA7, "SVM", "i", "svm", [[SX(T_u64), VMZ, ImmN(T_u64)]], noVL=True).noTest() + T.Def(None, "SVM", "pr", "svm", [[SX(T_u64), VMZ512, SY(T_u64)]], noVL=True).noTest().NYI() + T.Def(None, "SVM", "pi", "svm", [[SX(T_u64), VMZ512, ImmN(T_u64)]], noVL=True).noTest() + T.VBRDm(0x8C) + T.VMVm() + + O_VMPD = [[VX(T_i64), VY(T_i32), VZ(T_i32)], + [VX(T_i64), SY(T_i32), VZ(T_i32)], + [VX(T_i64), ImmI(T_i32), VZ(T_i32)]] + + T.Section("Table 3-16. Vector Fixed-Point Arithmetic Operation Instructions", 23) + T.Inst3u(0xC8, "vaddu", "VADD", "{0} = {1} + {2}") # u32, u64 + T.Inst3w(0xCA, "vadds", "VADS", "", "{0} = {1} + {2}") # i32 + T.Inst3l(0x8B, "vadds", "VADX", "", "{0} = {1} + {2}") # i64 + T.Inst3u(0xC8, "vsubu", "VSUB", "{0} = {1} - {2}") # u32, u64 + T.Inst3w(0xCA, "vsubs", "VSBS", "", "{0} = {1} - {2}") # i32 + T.Inst3l(0x8B, "vsubs", "VSBX", "", "{0} = {1} - {2}") # i64 + T.Inst3u(0xC9, "vmulu", "VMPY", "{0} = {1} * {2}", False) + T.Inst3w(0xCB, "vmuls", "VMPS", "", "{0} = {1} * {2}", False) + T.Inst3l(0xDB, "vmuls", "VMPX", "", "{0} = {1} * {2}") + T.Def(0xD9, "VMPD", "", "vmuls.l.w", O_VMPD, "{0} = {1} * {2}") + T.Inst3u(0xE9, "vdivu", "VDIV", "{0} = {1} / {2}", False) + T.Inst3divbys(0xE9, "vdivu.l", "VDIV", "l", T_u64) + T.Inst3divbys(0xE9, "vdivu.w", "VDIV", "w", T_u32) + T.Inst3w(0xEB, "vdivs", "VDVS", "", "{0} = {1} / {2}", False) + T.Inst3divbys(0xEB, "vdivs.w.sx", "VDVS", "wsx", T_i32) + T.Inst3divbys(0xEB, "vdivs.w.zx", "VDVS", "wzx", T_i32) + T.Inst3l(0xFB, "vdivs", "VDVX", "", "{0} = {1} / {2}") + T.Inst3divbys(0xEB, "vdivs.l", "VDVX", "l", T_i64) + T.Inst3u(0xB9, "vcmpu", "VCMP", "{0} = compare({1}, {2})") + T.Inst3w(0xFA, "vcmps", "VCPS", "", "{0} = compare({1}, {2})") + T.Inst3l(0xBA, "vcmps", "VCPX", "", "{0} = compare({1}, {2})") + T.Inst3w(0x8A, "vmaxs", "VCMS", "a", "{0} = max({1}, {2})") + T.Inst3w(0x8A, "vmins", "VCMS", "i", "{0} = min({1}, {2})") + T.Inst3l(0x9A, "vmaxs", "VCMX", "a", "{0} = max({1}, {2})") + T.Inst3l(0x9A, "vmins", "VCMX", "i", "{0} = min({1}, {2})") + + T.Section("Table 3-17 Vector Logical Arithmetic Operation Instructions", 25) + T.Logical(0xC4, "vand", "VAND", "{0} = {1} & {2}") + T.Logical(0xC5, "vor", "VOR", "{0} = {1} | {2}") + T.Logical(0xC6, "vxor", "VXOR", "{0} = {1} ^ {2}") + T.Logical(0xC7, "veqv", "VEQV", "{0} = ~({1} ^ {2})") + T.NoImpl("VLDZ") + T.NoImpl("VPCNT") + T.NoImpl("VBRV") + T.Def(0x99, "VSEQ", "", "vseq", [[VX(T_u64)]], "{0} = i").noTest() + T.Def(0x99, "VSEQ", "l", "pvseq.lo", [[VX(T_u64)]], "{0} = i").noTest() + T.Def(0x99, "VSEQ", "u", "pvseq.up", [[VX(T_u64)]], "{0} = i").noTest() + T.Def(0x99, "VSEQ", "p", "pvseq", [[VX(T_u64)]], "{0} = i").noTest() + + T.Section("Table 3-18 Vector Shift Instructions", 27) + T.Shift(0xE5, "vsll", "VSLL", T_u64, "{0} = {1} << ({2} & 0x3f)") + T.ShiftPacked(0xE5, "vsll", "VSLL", T_u32, "{0} = {1} << ({2} & 0x1f)") + T.NoImpl("VSLD") + T.Shift(0xF5, "vsrl", "VSRL", T_u64, "{0} = {1} >> ({2} & 0x3f)") + T.ShiftPacked(0xF5, "vsrl", "VSRL", T_u32, "{0} = {1} >> ({2} & 0x1f)") + T.NoImpl("VSRD") + T.Shift(0xE6, "vsla.w", "VSLA", T_i32, "{0} = {1} << ({2} & 0x1f)") + T.ShiftPacked(0xE6, "vsla", "VSLA", T_i32, "{0} = {1} << ({2} & 0x1f)") + T.Shift(0xD4, "vsla.l", "VSLAX", T_i64, "{0} = {1} << ({2} & 0x3f)") + T.Shift(0xF6, "vsra.w", "VSRA", T_i32, "{0} = {1} >> ({2} & 0x1f)") + T.ShiftPacked(0xF6, "vsra", "VSRA", T_i32, "{0} = {1} >> ({2} & 0x1f)") + T.Shift(0xD5, "vsra.l", "VSRAX", T_i64, "{0} = {1} >> ({2} & 0x3f)") + + O_vsfa = [[VX(T_u64), VZ(T_u64), SY(T_u64), SZ(T_u64)],[VX(T_u64), VZ(T_u64), ImmI(T_u64), SZ(T_u64)]] + O_vsfa = T.addMask(O_vsfa) + T.Def(0xD7, "VSFA", "", "vsfa", O_vsfa, "{0} = ({1} << ({2} & 0x7)) + {3}") + + T.Section("Table 3-19 Vector Floating-Point Operation Instructions", 28) + T.Inst3f(0xCC, "vfadd", "VFAD", "", "{0} = {1} + {2}") + T.Inst3f(0xDC, "vfsub", "VFSB", "", "{0} = {1} - {2}") + T.Inst3f(0xCD, "vfmul", "VFMP", "", "{0} = {1} * {2}") + T.Inst3f(0xDD, "vfdiv", "VFDV", "", "{0} = {1} / {2}", False) + T.Inst2f(0xED, "vfsqrt", "VFSQRT", "{0} = std::sqrt({1})", False) + T.Inst3f(0xFC, "vfcmp", "VFCP", "", "{0} = compare({1}, {2})") + T.Inst3f(0xBD, "vfmax", "VFCM", "a", "{0} = max({1}, {2})") + T.Inst3f(0xBD, "vfmin", "VFCM", "i", "{0} = min({1}, {2})") + T.Inst4f(0xE2, "vfmad", "VFMAD", "{0} = {2} * {3} + {1}") + T.Inst4f(0xF2, "vfmsb", "VFMSB", "{0} = {2} * {3} - {1}") + T.Inst4f(0xE3, "vfnmad", "VFNMAD", "{0} = - ({2} * {3} + {1})") + T.Inst4f(0xF3, "vfnmsb", "VFNMSB", "{0} = - ({2} * {3} - {1})") + T.Inst2f(0xE1, "vrcp", "VRCP", "{0} = 1.0f / {1}") + T.Inst2f(0xF1, "vrsqrt", "VRSQRT", "{0} = 1.0f / std::sqrt({1})", True, True) + T.VFIX(0xE8, "VFIX", "dsx", "vcvt.w.d.sx", [[VX(T_i32), VY(T_f64)]], "int") + T.VFIX(0xE8, "VFIX", "dzx", "vcvt.w.d.zx", [[VX(T_i32), VY(T_f64)]], "unsigned int") + T.VFIX(0xE8, "VFIX", "ssx", "vcvt.w.s.sx", [[VX(T_i32), VY(T_f32)]], "int") + T.VFIX(0xE8, "VFIX", "szx", "vcvt.w.s.zx", [[VX(T_i32), VY(T_f32)]], "unsigned int") + T.VFIX(0xE8, "VFIX", "p", "pvcvt.w.s", [[VX(T_i32), VY(T_f32)]], "int") + T.VFIX(0xA8, "VFIXX", "", "vcvt.l.d", [[VX(T_i64), VY(T_f64)]], "long long") + T.Def(0xF8, "VFLT", "d", "vcvt.d.w", [[VX(T_f64), VY(T_i32)]], "{0} = (double){1}") + T.Def(0xF8, "VFLT", "s", "vcvt.s.w", [[VX(T_f32), VY(T_i32)]], "{0} = (float){1}") + T.Def(0xF8, "VFLT", "p", "pvcvt.s.w", [[VX(T_f32), VY(T_i32)]], "{0} = (float){1}") + T.Def(0xB8, "VFLTX", "", "vcvt.d.l", [[VX(T_f64), VY(T_i64)]], "{0} = (double){1}") + T.Def(0x8F, "VCVD", "", "vcvt.d.s", [[VX(T_f64), VY(T_f32)]], "{0} = (double){1}") + T.Def(0x9F, "VCVS", "", "vcvt.s.d", [[VX(T_f32), VY(T_f64)]], "{0} = (float){1}") + + T.Section("Table 3-20 Vector Mask Arithmetic Instructions", 32) + T.Def(0xD6, "VMRG", "", "vmrg", [[VX(T_u64), VY(T_u64), VZ(T_u64), VM]]).noTest() + T.Def(0xD6, "VMRG", "", "vmrg", [[VX(T_u64), SY(T_u64), VZ(T_u64), VM]]).noTest() + T.Def(0xD6, "VMRG", "", "vmrg", [[VX(T_u64), ImmI(T_u64), VZ(T_u64), VM]]).noTest() + T.Def(0xD6, "VMRG", "p", "vmrg.w", [[VX(T_u32), VY(T_u32), VZ(T_u32), VM512]]).noTest() + T.Def(0xD6, "VMRG", "p", "vmrg.w", [[VX(T_u32), SY(T_u32), VZ(T_u32), VM512]]).noTest() + T.Def(0xBC, "VSHF", "", "vshf", [[VX(T_u64), VY(T_u64), VZ(T_u64), SY(T_u64)], [VX(T_u64), VY(T_u64), VZ(T_u64), ImmN(T_u64)]]) + T.Def(0x8D, "VCP", "", "vcp", [[VX(T_u64), VZ(T_u64), VM, VD(T_u64)]]).noTest() + T.Def(0x9D, "VEX", "", "vex", [[VX(T_u64), VZ(T_u64), VM, VD(T_u64)]]).noTest() + + tmp = ["gt", "lt", "ne", "eq", "ge", "le", "num", "nan", "gtnan", "ltnan", "nenan", "eqnan", "genan", "lenan"] + T.Def(0xB4, "VFMK", "", "vfmk.l.at", [[VMX]]).noTest() + T.Def(0xB4, "VFMK", "", "vfmk.l.af", [[VMX]]).noTest() + T.Def(0xB5, "VFMK", "", "pvfmk.w.lo.at", [[VMX]]).noTest() + T.Def(0xB5, "VFMK", "", "pvfmk.w.up.at", [[VMX]]).noTest() + T.Def(0xB5, "VFMK", "", "pvfmk.w.lo.af", [[VMX]]).noTest() + T.Def(0xB5, "VFMK", "", "pvfmk.w.up.af", [[VMX]]).noTest() + T.Def(None, "VFMK", "pat", "pvfmk.at", [[VMX512]]).noTest() # Pseudo + T.Def(None, "VFMK", "paf", "pvfmk.af", [[VMX512]]).noTest() # Pseudo + for cc in tmp: + T.Def(0xB4, "VFMK", "", "vfmk.l."+cc, [[VMX, VZ(T_i64)]]).noTest() + T.Def(0xB4, "VFMK", "", "vfmk.l."+cc, [[VMX, VZ(T_i64), VM]]).noTest() + for cc in tmp: + T.Def(0xB5, "VFMS", "", "vfmk.w."+cc, [[VMX, VZ(T_i32)]]).noTest() + T.Def(0xB5, "VFMS", "", "vfmk.w."+cc, [[VMX, VZ(T_i32), VM]]).noTest() + for cc in tmp: + T.Def(0xB5, "VFMS", "", "pvfmk.w.lo."+cc, [[VMX, VZ(T_i32)]]).noTest() + T.Def(0xB5, "VFMS", "", "pvfmk.w.up."+cc, [[VMX, VZ(T_i32)]]).noTest() + T.Def(0xB5, "VFMS", "", "pvfmk.w.lo."+cc, [[VMX, VZ(T_i32), VM]]).noTest() + T.Def(0xB5, "VFMS", "", "pvfmk.w.up."+cc, [[VMX, VZ(T_i32), VM]]).noTest() + for cc in tmp: + T.Def(None, "VFMS", "p", "pvfmk.w."+cc, [[VMX512, VZ(T_i32)]]).noTest() # Pseudo + T.Def(None, "VFMS", "p", "pvfmk.w."+cc, [[VMX512, VZ(T_i32), VM512]]).noTest() # Pseudo + for cc in tmp: + T.Def(0xB6, "VFMF", "d", "vfmk.d."+cc, [[VMX, VZ(T_f64)]]).noTest() + T.Def(0xB6, "VFMF", "d", "vfmk.d."+cc, [[VMX, VZ(T_f64), VM]]).noTest() + for cc in tmp: + T.Def(0xB6, "VFMF", "s", "vfmk.s."+cc, [[VMX, VZ(T_f32)]]).noTest() + T.Def(0xB6, "VFMF", "s", "vfmk.s."+cc, [[VMX, VZ(T_f32), VM]]).noTest() + for cc in tmp: + T.Def(0xB6, "VFMF", "s", "pvfmk.s.lo."+cc, [[VMX, VZ(T_f32)]]).noTest() + T.Def(0xB6, "VFMF", "s", "pvfmk.s.up."+cc, [[VMX, VZ(T_f32)]]).noTest() + T.Def(0xB6, "VFMF", "s", "pvfmk.s.lo."+cc, [[VMX, VZ(T_f32), VM]]).noTest() + T.Def(0xB6, "VFMF", "s", "pvfmk.s.up."+cc, [[VMX, VZ(T_f32), VM]]).noTest() + for cc in tmp: + T.Def(None, "VFMF", "p", "pvfmk.s."+cc, [[VMX512, VZ(T_f32)]]).noTest() # Pseudo + T.Def(None, "VFMF", "p", "pvfmk.s."+cc, [[VMX512, VZ(T_f32), VM512]]).noTest() # Pseudo + + T.Section("Table 3-21 Vector Recursive Relation Instructions", 32) + T.VSUM(0xEA, "VSUMS", "sx", "vsum.w.sx", [[VX(T_i32), VY(T_i32)]]) + T.VSUM(0xEA, "VSUMS", "zx", "vsum.w.zx", [[VX(T_i32), VY(T_i32)]]) + T.VSUM(0xAA, "VSUMX", "", "vsum.l", [[VX(T_i64), VY(T_i64)]]) + T.VSUM(0xEC, "VFSUM", "d", "vfsum.d", [[VX(T_f64), VY(T_f64)]]) + T.VSUM(0xEC, "VFSUM", "s", "vfsum.s", [[VX(T_f32), VY(T_f32)]]) + T.FLm(0xBB, "VMAXS", "a{fl}sx", "vrmaxs.w{fl}.sx", [[VX(T_i32), VY(T_i32)]]) + T.FLm(0xBB, "VMAXS", "a{fl}zx", "vrmaxs.w{fl}.zx", [[VX(T_u32), VY(T_u32)]]) + T.FLm(0xBB, "VMAXS", "i{fl}sx", "vrmins.w{fl}.sx", [[VX(T_i32), VY(T_i32)]]) + T.FLm(0xBB, "VMAXS", "i{fl}zx", "vrmins.w{fl}.zx", [[VX(T_u32), VY(T_u32)]]) + T.FLm(0xAB, "VMAXX", "a{fl}", "vrmaxs.l{fl}", [[VX(T_i64), VY(T_i64)]]) + T.FLm(0xAB, "VMAXX", "i{fl}", "vrmins.l{fl}", [[VX(T_i64), VY(T_i64)]]) + T.FLm(0xAD, "VFMAX", "ad{fl}", "vfrmax.d{fl}", [[VX(T_f64), VY(T_f64)]]) + T.FLm(0xAD, "VFMAX", "as{fl}", "vfrmax.s{fl}", [[VX(T_f32), VY(T_f32)]]) + T.FLm(0xAD, "VFMAX", "id{fl}", "vfrmin.d{fl}", [[VX(T_f64), VY(T_f64)]]) + T.FLm(0xAD, "VFMAX", "is{fl}", "vfrmin.s{fl}", [[VX(T_f32), VY(T_f32)]]) + T.VSUM(0x88, "VRAND", "", "vrand", [[VX(T_u64), VY(T_u64)]]) + T.VSUM(0x98, "VROR", "", "vror", [[VX(T_u64), VY(T_u64)]]) + T.VSUM(0x89, "VRXOR", "", "vrxor", [[VX(T_u64), VY(T_u64)]]) + T.NoImpl("VFIA") + T.NoImpl("VFIS") + T.NoImpl("VFIM") + T.NoImpl("VFIAM") + T.NoImpl("VFISM") + T.NoImpl("VFIMA") + T.NoImpl("VFIMS") + + T.Section("Table 3-22 Vector Gathering/Scattering Instructions", 34) + T.VGTm(0xA1, "VGT", "", "vgt") + T.VGTm(0xA2, "VGTU", "", "vgtu") + T.VGTm(0xA3, "VGTL", "sx", "vgtl.sx") + T.VGTm(0xA3, "VGTL", "zx", "vgtl.zx") + T.VSCm(0xB1, "VSC", "VSC", "vsc") + T.VSCm(0xB2, "VSCU", "VSCU", "vscu") + T.VSCm(0xB3, "VSCL", "VSCL", "vscl") + + T.Section("Table 3-23 Vector Mask Register Instructions", 34) + T.Def(0x84, "ANDM", "", "andm", [[VMX, VMY, VMZ]], "{0} = {1} & {2}", noVL=True) + T.Def(None, "ANDM", "p", "andm", [[VMX512, VMY512, VMZ512]], "{0} = {1} & {2}", noVL=True) + T.Def(0x85, "ORM", "", "orm", [[VMX, VMY, VMZ]], "{0} = {1} | {2}", noVL=True) + T.Def(None, "ORM", "p", "orm", [[VMX512, VMY512, VMZ512]], "{0} = {1} | {2}", noVL=True) + T.Def(0x86, "XORM", "", "xorm", [[VMX, VMY, VMZ]], "{0} = {1} ^ {2}", noVL=True) + T.Def(None, "XORM", "p", "xorm", [[VMX512, VMY512, VMZ512]], "{0} = {1} ^ {2}", noVL=True) + T.Def(0x87, "EQVM", "", "eqvm", [[VMX, VMY, VMZ]], "{0} = ~({1} ^ {2})", noVL=True) + T.Def(None, "EQVM", "p", "eqvm", [[VMX512, VMY512, VMZ512]], "{0} = ~({1} ^ {2})", noVL=True) + T.Def(0x94, "NNDM", "", "nndm", [[VMX, VMY, VMZ]], "{0} = (~{1}) & {2}", noVL=True) + T.Def(None, "NNDM", "p", "nndm", [[VMX512, VMY512, VMZ512]], "{0} = (~{1}) & {2}", noVL=True) + T.Def(0x95, "NEGM", "", "negm", [[VMX, VMY]], "{0} = ~{1}", noVL=True) + T.Def(None, "NEGM", "p", "negm", [[VMX512, VMY512]], "{0} = ~{1}", noVL=True) + T.Def(0xA4, "PCVM", "", "pcvm", [[SX(T_u64), VMY]]).noTest(); + T.Def(0xA5, "LZVM", "", "lzvm", [[SX(T_u64), VMY]]).noTest(); + T.Def(0xA6, "TOVM", "", "tovm", [[SX(T_u64), VMY]]).noTest(); + + T.Section("Table 3-24 Vector Control Instructions", 35) + T.NoImpl("SMVL") + T.NoImpl("LVIX") + + T.Section("Table 3-25 Control Instructions", 35) + T.Dummy(0x30, "SVOB", "void _vel_svob(void)", "svob"); + + T.Section("Approximate Operations", None) + T.Def(None, None, "", "approx_vfdivs", [[VX(T_f32), VY(T_f32), VZ(T_f32)]], expr="{0} = {1} / {2}", noPassThrough=True).noLLVM() + T.Def(None, None, "", "approx_vfdivs", [[VX(T_f32), SY(T_f32), VZ(T_f32)]], expr="{0} = {1} / {2}", noPassThrough=True).noLLVM() + T.Def(None, None, "", "approx_vfdivs", [[VX(T_f32), VY(T_f32), SZ(T_f32)]], expr="{0} = {1} / {2}", noPassThrough=True).noLLVM() + T.Def(None, None, "", "approx_vfdivd", [[VX(T_f64), SY(T_f64), VZ(T_f64)]], expr="{0} = {1} / {2}", noPassThrough=True).noLLVM() + T.Def(None, None, "", "approx_pvfdiv", [[VX(T_f32), VY(T_f32), VZ(T_f32)]], expr="{0} = {1} / {2}", noPassThrough=True).noLLVM() + T.Def(None, None, "", "approx_vfsqrtd", [[VX(T_f64), VY(T_f64)]], expr="{0} = sqrtf({1})", noPassThrough=True).noLLVM() + T.Def(None, None, "", "approx_vfsqrts", [[VX(T_f32), VY(T_f32)]], expr="{0} = sqrtf({1})", noPassThrough=True).noLLVM() + + T.Section("Others", None) + T.Dummy(None, "", "unsigned long int _vel_pack_f32p(float const* p0, float const* p1)", "ldu,ldl,or") + T.Dummy(None, "", "unsigned long int _vel_pack_f32a(float const* p)", "load and mul") + T.Dummy(None, "", "unsigned long int _vel_pack_i32(int a, int b)", "sll,add,or") + + T.Def(None, None, "", "vec_expf", [[VX(T_f32), VY(T_f32)]], "{0} = expf({1})").noBuiltin().noLLVMInstDefine().NYI() + T.Def(None, None, "", "vec_exp", [[VX(T_f64), VY(T_f64)]], "{0} = exp({1})").noBuiltin().noLLVMInstDefine().NYI() + T.Dummy(None, "", "__vm256 _vel_extract_vm512u(__vm512 vm)", "") + T.Dummy(None, "", "__vm256 _vel_extract_vm512l(__vm512 vm)", "") + T.Dummy(None, "", "__vm512 _vel_insert_vm512u(__vm512 vmx, __vm256 vmy)", "") + T.Dummy(None, "", "__vm512 _vel_insert_vm512l(__vm512 vmx, __vm256 vmy)", "") + + return T + +# +# End of instruction definition +# + +def cmpwrite(filename, data): + need_write = True + try: + with open(filename, "r") as f: + old = f.read() + need_write = old != data + except: + pass + if need_write: + print("write " + filename) + with open(filename, "w") as f: + f.write(data) + + +def gen_test(insts, directory): + for I in insts: + if I.hasPassThroughOp() and (not I.hasMask()): + continue + if I.hasTest(): + data = getTestGenerator(I).gen(I).definition() + if directory and (directory != "-"): + filename = "{}/{}.c".format(directory, I.intrinsicName()) + if I.hasImmOp(): + filename = "{}/{}_imm.c".format(directory, I.intrinsicName()) + cmpwrite(filename, data) + else: + print(data) + +def gen_inst_def(insts): + for I in insts: + if I.hasLLVMInstDefine(): + print(I.instDefine()) + +def gen_intrinsic_def(insts): + for I in insts: + if not I.hasImmOp() and I.hasIntrinsicDef(): + print(I.intrinsicDefine()) + +def gen_pattern(insts): + for I in insts: + if I.hasInst()and I.hasPat(): + print(I.pattern()) + +def gen_builtin(insts): + for I in insts: + if (not I.hasImmOp()) and I.hasBuiltin(): + print(I.builtin()) + +def gen_veintrin_h(insts): + for I in insts: + if (not I.hasImmOp()) and I.hasBuiltin(): + print(I.veintrin()) + +def gen_vl_index(insts): + print("default: return -1;") + for I in insts: + if I.hasLLVMInstDefine() and I.hasVLOp(): + index = len(I.outs) + I.ins.index(VL) + print("case VE::{}: return {};".format(I.llvmInst(), index)) + + +import argparse + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--intrin', dest="opt_intrin", action="store_true") + parser.add_argument('--inst', dest="opt_inst", action="store_true") + parser.add_argument('-p', "--pattern", dest="opt_pat", action="store_true") + parser.add_argument('-b', dest="opt_builtin", action="store_true") + parser.add_argument('--veintrin', dest="opt_veintrin", action="store_true") + parser.add_argument('--decl', dest="opt_decl", action="store_true") + parser.add_argument('-t', dest="opt_test", action="store_true") + parser.add_argument('-r', dest="opt_reference", action="store_true") + parser.add_argument('-f', dest="opt_filter", action="store") + parser.add_argument('-a', dest="opt_all", action="store_true") + parser.add_argument('--html', dest="opt_html", action="store_true") + parser.add_argument('--html-no-link', action="store_true") + parser.add_argument('-l', dest="opt_lowering", action="store_true") + parser.add_argument('--test-dir', default="../../llvm-ve-intrinsic-test/gen/tests") + parser.add_argument('--vl-index', action="store_true"); + args, others = parser.parse_known_args() + + T = createInstructionTable() + insts = T.insts() + + if args.opt_filter: + insts = [i for i in insts if re.search(args.opt_filter, i.intrinsicName())] + print("filter: {} -> {}".format(args.opt_filter, len(insts))) + + if args.opt_all: + args.opt_inst = True + args.opt_intrin = True + args.opt_pat = True + args.opt_builtin = True + args.opt_veintrin = True + args.opt_decl = True + args.opt_reference = True + args.opt_test = True + #args.opt_html = True + test_dir = None + + if args.opt_inst: + gen_inst_def(insts) + if args.opt_intrin: + gen_intrinsic_def(insts) + if args.opt_pat: + gen_pattern(insts) + if args.opt_builtin: + gen_builtin(insts) + if args.opt_veintrin: + gen_veintrin_h(insts) + if args.opt_decl: + for I in insts: + if I.hasTest(): + print(getTestGenerator(I).gen(I).decl()) + if args.opt_test: + gen_test(insts, args.test_dir) + if args.opt_reference: + print('#include ') + print('#include ') + print('using namespace std;') + print('#include "../refutils.h"') + print('namespace ref {') + for I in insts: + if I.isNotYetImplemented(): + continue + if I.hasTest(): + f = getTestGenerator(I).gen(I).reference() + if f: + print(f) + continue + + if len(i.outs) > 0 and i.outs[0].isMask() and i.hasExpr(): + f = TestGeneratorMask().gen(i) + print(f.reference()) + continue + if i.hasTest() and i.hasExpr(): + print(TestGenerator().reference(i)) + print('}') + if args.opt_html: + HtmlManualPrinter().printAll(T, False) + if args.html_no_link: + HtmlManualPrinter().printAll(T, True) + if args.vl_index: + gen_vl_index(insts) + +if __name__ == "__main__": + main() diff --git a/llvm/lib/Target/VE/vl-index.inc b/llvm/lib/Target/VE/vl-index.inc new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/VE/vl-index.inc @@ -0,0 +1,1673 @@ +default: return -1; +case VE::vld_vssl: return 3; +case VE::vld_vssvl: return 4; +case VE::vld_vIsl: return 3; +case VE::vld_vIsvl: return 4; +case VE::vldnc_vssl: return 3; +case VE::vldnc_vssvl: return 4; +case VE::vldnc_vIsl: return 3; +case VE::vldnc_vIsvl: return 4; +case VE::vldu_vssl: return 3; +case VE::vldu_vssvl: return 4; +case VE::vldu_vIsl: return 3; +case VE::vldu_vIsvl: return 4; +case VE::vldunc_vssl: return 3; +case VE::vldunc_vssvl: return 4; +case VE::vldunc_vIsl: return 3; +case VE::vldunc_vIsvl: return 4; +case VE::vldlsx_vssl: return 3; +case VE::vldlsx_vssvl: return 4; +case VE::vldlsx_vIsl: return 3; +case VE::vldlsx_vIsvl: return 4; +case VE::vldlsxnc_vssl: return 3; +case VE::vldlsxnc_vssvl: return 4; +case VE::vldlsxnc_vIsl: return 3; +case VE::vldlsxnc_vIsvl: return 4; +case VE::vldlzx_vssl: return 3; +case VE::vldlzx_vssvl: return 4; +case VE::vldlzx_vIsl: return 3; +case VE::vldlzx_vIsvl: return 4; +case VE::vldlzxnc_vssl: return 3; +case VE::vldlzxnc_vssvl: return 4; +case VE::vldlzxnc_vIsl: return 3; +case VE::vldlzxnc_vIsvl: return 4; +case VE::vld2d_vssl: return 3; +case VE::vld2d_vssvl: return 4; +case VE::vld2d_vIsl: return 3; +case VE::vld2d_vIsvl: return 4; +case VE::vld2dnc_vssl: return 3; +case VE::vld2dnc_vssvl: return 4; +case VE::vld2dnc_vIsl: return 3; +case VE::vld2dnc_vIsvl: return 4; +case VE::vldu2d_vssl: return 3; +case VE::vldu2d_vssvl: return 4; +case VE::vldu2d_vIsl: return 3; +case VE::vldu2d_vIsvl: return 4; +case VE::vldu2dnc_vssl: return 3; +case VE::vldu2dnc_vssvl: return 4; +case VE::vldu2dnc_vIsl: return 3; +case VE::vldu2dnc_vIsvl: return 4; +case VE::vldl2dsx_vssl: return 3; +case VE::vldl2dsx_vssvl: return 4; +case VE::vldl2dsx_vIsl: return 3; +case VE::vldl2dsx_vIsvl: return 4; +case VE::vldl2dsxnc_vssl: return 3; +case VE::vldl2dsxnc_vssvl: return 4; +case VE::vldl2dsxnc_vIsl: return 3; +case VE::vldl2dsxnc_vIsvl: return 4; +case VE::vldl2dzx_vssl: return 3; +case VE::vldl2dzx_vssvl: return 4; +case VE::vldl2dzx_vIsl: return 3; +case VE::vldl2dzx_vIsvl: return 4; +case VE::vldl2dzxnc_vssl: return 3; +case VE::vldl2dzxnc_vssvl: return 4; +case VE::vldl2dzxnc_vIsl: return 3; +case VE::vldl2dzxnc_vIsvl: return 4; +case VE::vst_vssl: return 3; +case VE::vst_vIsl: return 3; +case VE::vst_vssml: return 4; +case VE::vst_vIsml: return 4; +case VE::vstnc_vssl: return 3; +case VE::vstnc_vIsl: return 3; +case VE::vstnc_vssml: return 4; +case VE::vstnc_vIsml: return 4; +case VE::vstot_vssl: return 3; +case VE::vstot_vIsl: return 3; +case VE::vstot_vssml: return 4; +case VE::vstot_vIsml: return 4; +case VE::vstncot_vssl: return 3; +case VE::vstncot_vIsl: return 3; +case VE::vstncot_vssml: return 4; +case VE::vstncot_vIsml: return 4; +case VE::vstu_vssl: return 3; +case VE::vstu_vIsl: return 3; +case VE::vstu_vssml: return 4; +case VE::vstu_vIsml: return 4; +case VE::vstunc_vssl: return 3; +case VE::vstunc_vIsl: return 3; +case VE::vstunc_vssml: return 4; +case VE::vstunc_vIsml: return 4; +case VE::vstuot_vssl: return 3; +case VE::vstuot_vIsl: return 3; +case VE::vstuot_vssml: return 4; +case VE::vstuot_vIsml: return 4; +case VE::vstuncot_vssl: return 3; +case VE::vstuncot_vIsl: return 3; +case VE::vstuncot_vssml: return 4; +case VE::vstuncot_vIsml: return 4; +case VE::vstl_vssl: return 3; +case VE::vstl_vIsl: return 3; +case VE::vstl_vssml: return 4; +case VE::vstl_vIsml: return 4; +case VE::vstlnc_vssl: return 3; +case VE::vstlnc_vIsl: return 3; +case VE::vstlnc_vssml: return 4; +case VE::vstlnc_vIsml: return 4; +case VE::vstlot_vssl: return 3; +case VE::vstlot_vIsl: return 3; +case VE::vstlot_vssml: return 4; +case VE::vstlot_vIsml: return 4; +case VE::vstlncot_vssl: return 3; +case VE::vstlncot_vIsl: return 3; +case VE::vstlncot_vssml: return 4; +case VE::vstlncot_vIsml: return 4; +case VE::vst2d_vssl: return 3; +case VE::vst2d_vIsl: return 3; +case VE::vst2d_vssml: return 4; +case VE::vst2d_vIsml: return 4; +case VE::vst2dnc_vssl: return 3; +case VE::vst2dnc_vIsl: return 3; +case VE::vst2dnc_vssml: return 4; +case VE::vst2dnc_vIsml: return 4; +case VE::vst2dot_vssl: return 3; +case VE::vst2dot_vIsl: return 3; +case VE::vst2dot_vssml: return 4; +case VE::vst2dot_vIsml: return 4; +case VE::vst2dncot_vssl: return 3; +case VE::vst2dncot_vIsl: return 3; +case VE::vst2dncot_vssml: return 4; +case VE::vst2dncot_vIsml: return 4; +case VE::vstu2d_vssl: return 3; +case VE::vstu2d_vIsl: return 3; +case VE::vstu2d_vssml: return 4; +case VE::vstu2d_vIsml: return 4; +case VE::vstu2dnc_vssl: return 3; +case VE::vstu2dnc_vIsl: return 3; +case VE::vstu2dnc_vssml: return 4; +case VE::vstu2dnc_vIsml: return 4; +case VE::vstu2dot_vssl: return 3; +case VE::vstu2dot_vIsl: return 3; +case VE::vstu2dot_vssml: return 4; +case VE::vstu2dot_vIsml: return 4; +case VE::vstu2dncot_vssl: return 3; +case VE::vstu2dncot_vIsl: return 3; +case VE::vstu2dncot_vssml: return 4; +case VE::vstu2dncot_vIsml: return 4; +case VE::vstl2d_vssl: return 3; +case VE::vstl2d_vIsl: return 3; +case VE::vstl2d_vssml: return 4; +case VE::vstl2d_vIsml: return 4; +case VE::vstl2dnc_vssl: return 3; +case VE::vstl2dnc_vIsl: return 3; +case VE::vstl2dnc_vssml: return 4; +case VE::vstl2dnc_vIsml: return 4; +case VE::vstl2dot_vssl: return 3; +case VE::vstl2dot_vIsl: return 3; +case VE::vstl2dot_vssml: return 4; +case VE::vstl2dot_vIsml: return 4; +case VE::vstl2dncot_vssl: return 3; +case VE::vstl2dncot_vIsl: return 3; +case VE::vstl2dncot_vssml: return 4; +case VE::vstl2dncot_vIsml: return 4; +case VE::pfchv_ssl: return 2; +case VE::pfchv_Isl: return 2; +case VE::pfchvnc_ssl: return 2; +case VE::pfchvnc_Isl: return 2; +case VE::vbrd_vsl: return 2; +case VE::vbrd_vsvl: return 3; +case VE::vbrd_vsmvl: return 4; +case VE::vbrd_vIl: return 2; +case VE::vbrd_vIvl: return 3; +case VE::vbrd_vImvl: return 4; +case VE::vbrdu_vsl: return 2; +case VE::vbrdu_vsvl: return 3; +case VE::vbrdu_vsmvl: return 4; +case VE::vbrdl_vsl: return 2; +case VE::vbrdl_vsvl: return 3; +case VE::vbrdl_vsmvl: return 4; +case VE::vbrdl_vIl: return 2; +case VE::vbrdl_vIvl: return 3; +case VE::vbrdl_vImvl: return 4; +case VE::pvbrd_vsl: return 2; +case VE::pvbrd_vsvl: return 3; +case VE::pvbrd_vsMvl: return 4; +case VE::vmv_vsvl: return 3; +case VE::vmv_vsvvl: return 4; +case VE::vmv_vIvl: return 3; +case VE::vmv_vIvvl: return 4; +case VE::vmv_vsvmvl: return 5; +case VE::vmv_vIvmvl: return 5; +case VE::vaddul_vvvl: return 3; +case VE::vaddul_vvvvl: return 4; +case VE::vaddul_vsvl: return 3; +case VE::vaddul_vsvvl: return 4; +case VE::vaddul_vIvl: return 3; +case VE::vaddul_vIvvl: return 4; +case VE::vaddul_vvvmvl: return 5; +case VE::vaddul_vsvmvl: return 5; +case VE::vaddul_vIvmvl: return 5; +case VE::vadduw_vvvl: return 3; +case VE::vadduw_vvvvl: return 4; +case VE::vadduw_vsvl: return 3; +case VE::vadduw_vsvvl: return 4; +case VE::vadduw_vIvl: return 3; +case VE::vadduw_vIvvl: return 4; +case VE::vadduw_vvvmvl: return 5; +case VE::vadduw_vsvmvl: return 5; +case VE::vadduw_vIvmvl: return 5; +case VE::pvaddu_vvvl: return 3; +case VE::pvaddu_vvvvl: return 4; +case VE::pvaddu_vsvl: return 3; +case VE::pvaddu_vsvvl: return 4; +case VE::pvaddu_vvvMvl: return 5; +case VE::pvaddu_vsvMvl: return 5; +case VE::vaddswsx_vvvl: return 3; +case VE::vaddswsx_vvvvl: return 4; +case VE::vaddswsx_vsvl: return 3; +case VE::vaddswsx_vsvvl: return 4; +case VE::vaddswsx_vIvl: return 3; +case VE::vaddswsx_vIvvl: return 4; +case VE::vaddswsx_vvvmvl: return 5; +case VE::vaddswsx_vsvmvl: return 5; +case VE::vaddswsx_vIvmvl: return 5; +case VE::vaddswzx_vvvl: return 3; +case VE::vaddswzx_vvvvl: return 4; +case VE::vaddswzx_vsvl: return 3; +case VE::vaddswzx_vsvvl: return 4; +case VE::vaddswzx_vIvl: return 3; +case VE::vaddswzx_vIvvl: return 4; +case VE::vaddswzx_vvvmvl: return 5; +case VE::vaddswzx_vsvmvl: return 5; +case VE::vaddswzx_vIvmvl: return 5; +case VE::pvadds_vvvl: return 3; +case VE::pvadds_vvvvl: return 4; +case VE::pvadds_vsvl: return 3; +case VE::pvadds_vsvvl: return 4; +case VE::pvadds_vvvMvl: return 5; +case VE::pvadds_vsvMvl: return 5; +case VE::vaddsl_vvvl: return 3; +case VE::vaddsl_vvvvl: return 4; +case VE::vaddsl_vsvl: return 3; +case VE::vaddsl_vsvvl: return 4; +case VE::vaddsl_vIvl: return 3; +case VE::vaddsl_vIvvl: return 4; +case VE::vaddsl_vvvmvl: return 5; +case VE::vaddsl_vsvmvl: return 5; +case VE::vaddsl_vIvmvl: return 5; +case VE::vsubul_vvvl: return 3; +case VE::vsubul_vvvvl: return 4; +case VE::vsubul_vsvl: return 3; +case VE::vsubul_vsvvl: return 4; +case VE::vsubul_vIvl: return 3; +case VE::vsubul_vIvvl: return 4; +case VE::vsubul_vvvmvl: return 5; +case VE::vsubul_vsvmvl: return 5; +case VE::vsubul_vIvmvl: return 5; +case VE::vsubuw_vvvl: return 3; +case VE::vsubuw_vvvvl: return 4; +case VE::vsubuw_vsvl: return 3; +case VE::vsubuw_vsvvl: return 4; +case VE::vsubuw_vIvl: return 3; +case VE::vsubuw_vIvvl: return 4; +case VE::vsubuw_vvvmvl: return 5; +case VE::vsubuw_vsvmvl: return 5; +case VE::vsubuw_vIvmvl: return 5; +case VE::pvsubu_vvvl: return 3; +case VE::pvsubu_vvvvl: return 4; +case VE::pvsubu_vsvl: return 3; +case VE::pvsubu_vsvvl: return 4; +case VE::pvsubu_vvvMvl: return 5; +case VE::pvsubu_vsvMvl: return 5; +case VE::vsubswsx_vvvl: return 3; +case VE::vsubswsx_vvvvl: return 4; +case VE::vsubswsx_vsvl: return 3; +case VE::vsubswsx_vsvvl: return 4; +case VE::vsubswsx_vIvl: return 3; +case VE::vsubswsx_vIvvl: return 4; +case VE::vsubswsx_vvvmvl: return 5; +case VE::vsubswsx_vsvmvl: return 5; +case VE::vsubswsx_vIvmvl: return 5; +case VE::vsubswzx_vvvl: return 3; +case VE::vsubswzx_vvvvl: return 4; +case VE::vsubswzx_vsvl: return 3; +case VE::vsubswzx_vsvvl: return 4; +case VE::vsubswzx_vIvl: return 3; +case VE::vsubswzx_vIvvl: return 4; +case VE::vsubswzx_vvvmvl: return 5; +case VE::vsubswzx_vsvmvl: return 5; +case VE::vsubswzx_vIvmvl: return 5; +case VE::pvsubs_vvvl: return 3; +case VE::pvsubs_vvvvl: return 4; +case VE::pvsubs_vsvl: return 3; +case VE::pvsubs_vsvvl: return 4; +case VE::pvsubs_vvvMvl: return 5; +case VE::pvsubs_vsvMvl: return 5; +case VE::vsubsl_vvvl: return 3; +case VE::vsubsl_vvvvl: return 4; +case VE::vsubsl_vsvl: return 3; +case VE::vsubsl_vsvvl: return 4; +case VE::vsubsl_vIvl: return 3; +case VE::vsubsl_vIvvl: return 4; +case VE::vsubsl_vvvmvl: return 5; +case VE::vsubsl_vsvmvl: return 5; +case VE::vsubsl_vIvmvl: return 5; +case VE::vmulul_vvvl: return 3; +case VE::vmulul_vvvvl: return 4; +case VE::vmulul_vsvl: return 3; +case VE::vmulul_vsvvl: return 4; +case VE::vmulul_vIvl: return 3; +case VE::vmulul_vIvvl: return 4; +case VE::vmulul_vvvmvl: return 5; +case VE::vmulul_vsvmvl: return 5; +case VE::vmulul_vIvmvl: return 5; +case VE::vmuluw_vvvl: return 3; +case VE::vmuluw_vvvvl: return 4; +case VE::vmuluw_vsvl: return 3; +case VE::vmuluw_vsvvl: return 4; +case VE::vmuluw_vIvl: return 3; +case VE::vmuluw_vIvvl: return 4; +case VE::vmuluw_vvvmvl: return 5; +case VE::vmuluw_vsvmvl: return 5; +case VE::vmuluw_vIvmvl: return 5; +case VE::vmulswsx_vvvl: return 3; +case VE::vmulswsx_vvvvl: return 4; +case VE::vmulswsx_vsvl: return 3; +case VE::vmulswsx_vsvvl: return 4; +case VE::vmulswsx_vIvl: return 3; +case VE::vmulswsx_vIvvl: return 4; +case VE::vmulswsx_vvvmvl: return 5; +case VE::vmulswsx_vsvmvl: return 5; +case VE::vmulswsx_vIvmvl: return 5; +case VE::vmulswzx_vvvl: return 3; +case VE::vmulswzx_vvvvl: return 4; +case VE::vmulswzx_vsvl: return 3; +case VE::vmulswzx_vsvvl: return 4; +case VE::vmulswzx_vIvl: return 3; +case VE::vmulswzx_vIvvl: return 4; +case VE::vmulswzx_vvvmvl: return 5; +case VE::vmulswzx_vsvmvl: return 5; +case VE::vmulswzx_vIvmvl: return 5; +case VE::vmulsl_vvvl: return 3; +case VE::vmulsl_vvvvl: return 4; +case VE::vmulsl_vsvl: return 3; +case VE::vmulsl_vsvvl: return 4; +case VE::vmulsl_vIvl: return 3; +case VE::vmulsl_vIvvl: return 4; +case VE::vmulsl_vvvmvl: return 5; +case VE::vmulsl_vsvmvl: return 5; +case VE::vmulsl_vIvmvl: return 5; +case VE::vmulslw_vvvl: return 3; +case VE::vmulslw_vvvvl: return 4; +case VE::vmulslw_vsvl: return 3; +case VE::vmulslw_vsvvl: return 4; +case VE::vmulslw_vIvl: return 3; +case VE::vmulslw_vIvvl: return 4; +case VE::vdivul_vvvl: return 3; +case VE::vdivul_vvvvl: return 4; +case VE::vdivul_vsvl: return 3; +case VE::vdivul_vsvvl: return 4; +case VE::vdivul_vIvl: return 3; +case VE::vdivul_vIvvl: return 4; +case VE::vdivul_vvvmvl: return 5; +case VE::vdivul_vsvmvl: return 5; +case VE::vdivul_vIvmvl: return 5; +case VE::vdivuw_vvvl: return 3; +case VE::vdivuw_vvvvl: return 4; +case VE::vdivuw_vsvl: return 3; +case VE::vdivuw_vsvvl: return 4; +case VE::vdivuw_vIvl: return 3; +case VE::vdivuw_vIvvl: return 4; +case VE::vdivuw_vvvmvl: return 5; +case VE::vdivuw_vsvmvl: return 5; +case VE::vdivuw_vIvmvl: return 5; +case VE::vdivul_vvsl: return 3; +case VE::vdivul_vvsvl: return 4; +case VE::vdivul_vvIl: return 3; +case VE::vdivul_vvIvl: return 4; +case VE::vdivul_vvsmvl: return 5; +case VE::vdivul_vvImvl: return 5; +case VE::vdivuw_vvsl: return 3; +case VE::vdivuw_vvsvl: return 4; +case VE::vdivuw_vvIl: return 3; +case VE::vdivuw_vvIvl: return 4; +case VE::vdivuw_vvsmvl: return 5; +case VE::vdivuw_vvImvl: return 5; +case VE::vdivswsx_vvvl: return 3; +case VE::vdivswsx_vvvvl: return 4; +case VE::vdivswsx_vsvl: return 3; +case VE::vdivswsx_vsvvl: return 4; +case VE::vdivswsx_vIvl: return 3; +case VE::vdivswsx_vIvvl: return 4; +case VE::vdivswsx_vvvmvl: return 5; +case VE::vdivswsx_vsvmvl: return 5; +case VE::vdivswsx_vIvmvl: return 5; +case VE::vdivswzx_vvvl: return 3; +case VE::vdivswzx_vvvvl: return 4; +case VE::vdivswzx_vsvl: return 3; +case VE::vdivswzx_vsvvl: return 4; +case VE::vdivswzx_vIvl: return 3; +case VE::vdivswzx_vIvvl: return 4; +case VE::vdivswzx_vvvmvl: return 5; +case VE::vdivswzx_vsvmvl: return 5; +case VE::vdivswzx_vIvmvl: return 5; +case VE::vdivswsx_vvsl: return 3; +case VE::vdivswsx_vvsvl: return 4; +case VE::vdivswsx_vvIl: return 3; +case VE::vdivswsx_vvIvl: return 4; +case VE::vdivswsx_vvsmvl: return 5; +case VE::vdivswsx_vvImvl: return 5; +case VE::vdivswzx_vvsl: return 3; +case VE::vdivswzx_vvsvl: return 4; +case VE::vdivswzx_vvIl: return 3; +case VE::vdivswzx_vvIvl: return 4; +case VE::vdivswzx_vvsmvl: return 5; +case VE::vdivswzx_vvImvl: return 5; +case VE::vdivsl_vvvl: return 3; +case VE::vdivsl_vvvvl: return 4; +case VE::vdivsl_vsvl: return 3; +case VE::vdivsl_vsvvl: return 4; +case VE::vdivsl_vIvl: return 3; +case VE::vdivsl_vIvvl: return 4; +case VE::vdivsl_vvvmvl: return 5; +case VE::vdivsl_vsvmvl: return 5; +case VE::vdivsl_vIvmvl: return 5; +case VE::vdivsl_vvsl: return 3; +case VE::vdivsl_vvsvl: return 4; +case VE::vdivsl_vvIl: return 3; +case VE::vdivsl_vvIvl: return 4; +case VE::vdivsl_vvsmvl: return 5; +case VE::vdivsl_vvImvl: return 5; +case VE::vcmpul_vvvl: return 3; +case VE::vcmpul_vvvvl: return 4; +case VE::vcmpul_vsvl: return 3; +case VE::vcmpul_vsvvl: return 4; +case VE::vcmpul_vIvl: return 3; +case VE::vcmpul_vIvvl: return 4; +case VE::vcmpul_vvvmvl: return 5; +case VE::vcmpul_vsvmvl: return 5; +case VE::vcmpul_vIvmvl: return 5; +case VE::vcmpuw_vvvl: return 3; +case VE::vcmpuw_vvvvl: return 4; +case VE::vcmpuw_vsvl: return 3; +case VE::vcmpuw_vsvvl: return 4; +case VE::vcmpuw_vIvl: return 3; +case VE::vcmpuw_vIvvl: return 4; +case VE::vcmpuw_vvvmvl: return 5; +case VE::vcmpuw_vsvmvl: return 5; +case VE::vcmpuw_vIvmvl: return 5; +case VE::pvcmpu_vvvl: return 3; +case VE::pvcmpu_vvvvl: return 4; +case VE::pvcmpu_vsvl: return 3; +case VE::pvcmpu_vsvvl: return 4; +case VE::pvcmpu_vvvMvl: return 5; +case VE::pvcmpu_vsvMvl: return 5; +case VE::vcmpswsx_vvvl: return 3; +case VE::vcmpswsx_vvvvl: return 4; +case VE::vcmpswsx_vsvl: return 3; +case VE::vcmpswsx_vsvvl: return 4; +case VE::vcmpswsx_vIvl: return 3; +case VE::vcmpswsx_vIvvl: return 4; +case VE::vcmpswsx_vvvmvl: return 5; +case VE::vcmpswsx_vsvmvl: return 5; +case VE::vcmpswsx_vIvmvl: return 5; +case VE::vcmpswzx_vvvl: return 3; +case VE::vcmpswzx_vvvvl: return 4; +case VE::vcmpswzx_vsvl: return 3; +case VE::vcmpswzx_vsvvl: return 4; +case VE::vcmpswzx_vIvl: return 3; +case VE::vcmpswzx_vIvvl: return 4; +case VE::vcmpswzx_vvvmvl: return 5; +case VE::vcmpswzx_vsvmvl: return 5; +case VE::vcmpswzx_vIvmvl: return 5; +case VE::pvcmps_vvvl: return 3; +case VE::pvcmps_vvvvl: return 4; +case VE::pvcmps_vsvl: return 3; +case VE::pvcmps_vsvvl: return 4; +case VE::pvcmps_vvvMvl: return 5; +case VE::pvcmps_vsvMvl: return 5; +case VE::vcmpsl_vvvl: return 3; +case VE::vcmpsl_vvvvl: return 4; +case VE::vcmpsl_vsvl: return 3; +case VE::vcmpsl_vsvvl: return 4; +case VE::vcmpsl_vIvl: return 3; +case VE::vcmpsl_vIvvl: return 4; +case VE::vcmpsl_vvvmvl: return 5; +case VE::vcmpsl_vsvmvl: return 5; +case VE::vcmpsl_vIvmvl: return 5; +case VE::vmaxswsx_vvvl: return 3; +case VE::vmaxswsx_vvvvl: return 4; +case VE::vmaxswsx_vsvl: return 3; +case VE::vmaxswsx_vsvvl: return 4; +case VE::vmaxswsx_vIvl: return 3; +case VE::vmaxswsx_vIvvl: return 4; +case VE::vmaxswsx_vvvmvl: return 5; +case VE::vmaxswsx_vsvmvl: return 5; +case VE::vmaxswsx_vIvmvl: return 5; +case VE::vmaxswzx_vvvl: return 3; +case VE::vmaxswzx_vvvvl: return 4; +case VE::vmaxswzx_vsvl: return 3; +case VE::vmaxswzx_vsvvl: return 4; +case VE::vmaxswzx_vIvl: return 3; +case VE::vmaxswzx_vIvvl: return 4; +case VE::vmaxswzx_vvvmvl: return 5; +case VE::vmaxswzx_vsvmvl: return 5; +case VE::vmaxswzx_vIvmvl: return 5; +case VE::pvmaxs_vvvl: return 3; +case VE::pvmaxs_vvvvl: return 4; +case VE::pvmaxs_vsvl: return 3; +case VE::pvmaxs_vsvvl: return 4; +case VE::pvmaxs_vvvMvl: return 5; +case VE::pvmaxs_vsvMvl: return 5; +case VE::vminswsx_vvvl: return 3; +case VE::vminswsx_vvvvl: return 4; +case VE::vminswsx_vsvl: return 3; +case VE::vminswsx_vsvvl: return 4; +case VE::vminswsx_vIvl: return 3; +case VE::vminswsx_vIvvl: return 4; +case VE::vminswsx_vvvmvl: return 5; +case VE::vminswsx_vsvmvl: return 5; +case VE::vminswsx_vIvmvl: return 5; +case VE::vminswzx_vvvl: return 3; +case VE::vminswzx_vvvvl: return 4; +case VE::vminswzx_vsvl: return 3; +case VE::vminswzx_vsvvl: return 4; +case VE::vminswzx_vIvl: return 3; +case VE::vminswzx_vIvvl: return 4; +case VE::vminswzx_vvvmvl: return 5; +case VE::vminswzx_vsvmvl: return 5; +case VE::vminswzx_vIvmvl: return 5; +case VE::pvmins_vvvl: return 3; +case VE::pvmins_vvvvl: return 4; +case VE::pvmins_vsvl: return 3; +case VE::pvmins_vsvvl: return 4; +case VE::pvmins_vvvMvl: return 5; +case VE::pvmins_vsvMvl: return 5; +case VE::vmaxsl_vvvl: return 3; +case VE::vmaxsl_vvvvl: return 4; +case VE::vmaxsl_vsvl: return 3; +case VE::vmaxsl_vsvvl: return 4; +case VE::vmaxsl_vIvl: return 3; +case VE::vmaxsl_vIvvl: return 4; +case VE::vmaxsl_vvvmvl: return 5; +case VE::vmaxsl_vsvmvl: return 5; +case VE::vmaxsl_vIvmvl: return 5; +case VE::vminsl_vvvl: return 3; +case VE::vminsl_vvvvl: return 4; +case VE::vminsl_vsvl: return 3; +case VE::vminsl_vsvvl: return 4; +case VE::vminsl_vIvl: return 3; +case VE::vminsl_vIvvl: return 4; +case VE::vminsl_vvvmvl: return 5; +case VE::vminsl_vsvmvl: return 5; +case VE::vminsl_vIvmvl: return 5; +case VE::vand_vvvl: return 3; +case VE::vand_vvvvl: return 4; +case VE::vand_vsvl: return 3; +case VE::vand_vsvvl: return 4; +case VE::vand_vvvmvl: return 5; +case VE::vand_vsvmvl: return 5; +case VE::pvandlo_vvvl: return 3; +case VE::pvandlo_vvvvl: return 4; +case VE::pvandlo_vsvl: return 3; +case VE::pvandlo_vsvvl: return 4; +case VE::pvandlo_vvvMvl: return 5; +case VE::pvandlo_vsvMvl: return 5; +case VE::pvandup_vvvl: return 3; +case VE::pvandup_vvvvl: return 4; +case VE::pvandup_vsvl: return 3; +case VE::pvandup_vsvvl: return 4; +case VE::pvandup_vvvMvl: return 5; +case VE::pvandup_vsvMvl: return 5; +case VE::pvand_vvvl: return 3; +case VE::pvand_vvvvl: return 4; +case VE::pvand_vsvl: return 3; +case VE::pvand_vsvvl: return 4; +case VE::pvand_vvvMvl: return 5; +case VE::pvand_vsvMvl: return 5; +case VE::vor_vvvl: return 3; +case VE::vor_vvvvl: return 4; +case VE::vor_vsvl: return 3; +case VE::vor_vsvvl: return 4; +case VE::vor_vvvmvl: return 5; +case VE::vor_vsvmvl: return 5; +case VE::pvorlo_vvvl: return 3; +case VE::pvorlo_vvvvl: return 4; +case VE::pvorlo_vsvl: return 3; +case VE::pvorlo_vsvvl: return 4; +case VE::pvorlo_vvvMvl: return 5; +case VE::pvorlo_vsvMvl: return 5; +case VE::pvorup_vvvl: return 3; +case VE::pvorup_vvvvl: return 4; +case VE::pvorup_vsvl: return 3; +case VE::pvorup_vsvvl: return 4; +case VE::pvorup_vvvMvl: return 5; +case VE::pvorup_vsvMvl: return 5; +case VE::pvor_vvvl: return 3; +case VE::pvor_vvvvl: return 4; +case VE::pvor_vsvl: return 3; +case VE::pvor_vsvvl: return 4; +case VE::pvor_vvvMvl: return 5; +case VE::pvor_vsvMvl: return 5; +case VE::vxor_vvvl: return 3; +case VE::vxor_vvvvl: return 4; +case VE::vxor_vsvl: return 3; +case VE::vxor_vsvvl: return 4; +case VE::vxor_vvvmvl: return 5; +case VE::vxor_vsvmvl: return 5; +case VE::pvxorlo_vvvl: return 3; +case VE::pvxorlo_vvvvl: return 4; +case VE::pvxorlo_vsvl: return 3; +case VE::pvxorlo_vsvvl: return 4; +case VE::pvxorlo_vvvMvl: return 5; +case VE::pvxorlo_vsvMvl: return 5; +case VE::pvxorup_vvvl: return 3; +case VE::pvxorup_vvvvl: return 4; +case VE::pvxorup_vsvl: return 3; +case VE::pvxorup_vsvvl: return 4; +case VE::pvxorup_vvvMvl: return 5; +case VE::pvxorup_vsvMvl: return 5; +case VE::pvxor_vvvl: return 3; +case VE::pvxor_vvvvl: return 4; +case VE::pvxor_vsvl: return 3; +case VE::pvxor_vsvvl: return 4; +case VE::pvxor_vvvMvl: return 5; +case VE::pvxor_vsvMvl: return 5; +case VE::veqv_vvvl: return 3; +case VE::veqv_vvvvl: return 4; +case VE::veqv_vsvl: return 3; +case VE::veqv_vsvvl: return 4; +case VE::veqv_vvvmvl: return 5; +case VE::veqv_vsvmvl: return 5; +case VE::pveqvlo_vvvl: return 3; +case VE::pveqvlo_vvvvl: return 4; +case VE::pveqvlo_vsvl: return 3; +case VE::pveqvlo_vsvvl: return 4; +case VE::pveqvlo_vvvMvl: return 5; +case VE::pveqvlo_vsvMvl: return 5; +case VE::pveqvup_vvvl: return 3; +case VE::pveqvup_vvvvl: return 4; +case VE::pveqvup_vsvl: return 3; +case VE::pveqvup_vsvvl: return 4; +case VE::pveqvup_vvvMvl: return 5; +case VE::pveqvup_vsvMvl: return 5; +case VE::pveqv_vvvl: return 3; +case VE::pveqv_vvvvl: return 4; +case VE::pveqv_vsvl: return 3; +case VE::pveqv_vsvvl: return 4; +case VE::pveqv_vvvMvl: return 5; +case VE::pveqv_vsvMvl: return 5; +case VE::vseq_vl: return 1; +case VE::vseq_vvl: return 2; +case VE::pvseqlo_vl: return 1; +case VE::pvseqlo_vvl: return 2; +case VE::pvsequp_vl: return 1; +case VE::pvsequp_vvl: return 2; +case VE::pvseq_vl: return 1; +case VE::pvseq_vvl: return 2; +case VE::vsll_vvvl: return 3; +case VE::vsll_vvvvl: return 4; +case VE::vsll_vvsl: return 3; +case VE::vsll_vvsvl: return 4; +case VE::vsll_vvIl: return 3; +case VE::vsll_vvIvl: return 4; +case VE::vsll_vvvmvl: return 5; +case VE::vsll_vvsmvl: return 5; +case VE::vsll_vvImvl: return 5; +case VE::pvslllo_vvvl: return 3; +case VE::pvslllo_vvvvl: return 4; +case VE::pvslllo_vvsl: return 3; +case VE::pvslllo_vvsvl: return 4; +case VE::pvslllo_vvvMvl: return 5; +case VE::pvslllo_vvsMvl: return 5; +case VE::pvsllup_vvvl: return 3; +case VE::pvsllup_vvvvl: return 4; +case VE::pvsllup_vvsl: return 3; +case VE::pvsllup_vvsvl: return 4; +case VE::pvsllup_vvvMvl: return 5; +case VE::pvsllup_vvsMvl: return 5; +case VE::pvsll_vvvl: return 3; +case VE::pvsll_vvvvl: return 4; +case VE::pvsll_vvsl: return 3; +case VE::pvsll_vvsvl: return 4; +case VE::pvsll_vvvMvl: return 5; +case VE::pvsll_vvsMvl: return 5; +case VE::vsrl_vvvl: return 3; +case VE::vsrl_vvvvl: return 4; +case VE::vsrl_vvsl: return 3; +case VE::vsrl_vvsvl: return 4; +case VE::vsrl_vvIl: return 3; +case VE::vsrl_vvIvl: return 4; +case VE::vsrl_vvvmvl: return 5; +case VE::vsrl_vvsmvl: return 5; +case VE::vsrl_vvImvl: return 5; +case VE::pvsrllo_vvvl: return 3; +case VE::pvsrllo_vvvvl: return 4; +case VE::pvsrllo_vvsl: return 3; +case VE::pvsrllo_vvsvl: return 4; +case VE::pvsrllo_vvvMvl: return 5; +case VE::pvsrllo_vvsMvl: return 5; +case VE::pvsrlup_vvvl: return 3; +case VE::pvsrlup_vvvvl: return 4; +case VE::pvsrlup_vvsl: return 3; +case VE::pvsrlup_vvsvl: return 4; +case VE::pvsrlup_vvvMvl: return 5; +case VE::pvsrlup_vvsMvl: return 5; +case VE::pvsrl_vvvl: return 3; +case VE::pvsrl_vvvvl: return 4; +case VE::pvsrl_vvsl: return 3; +case VE::pvsrl_vvsvl: return 4; +case VE::pvsrl_vvvMvl: return 5; +case VE::pvsrl_vvsMvl: return 5; +case VE::vslaw_vvvl: return 3; +case VE::vslaw_vvvvl: return 4; +case VE::vslaw_vvsl: return 3; +case VE::vslaw_vvsvl: return 4; +case VE::vslaw_vvIl: return 3; +case VE::vslaw_vvIvl: return 4; +case VE::vslaw_vvvmvl: return 5; +case VE::vslaw_vvsmvl: return 5; +case VE::vslaw_vvImvl: return 5; +case VE::pvslalo_vvvl: return 3; +case VE::pvslalo_vvvvl: return 4; +case VE::pvslalo_vvsl: return 3; +case VE::pvslalo_vvsvl: return 4; +case VE::pvslalo_vvvMvl: return 5; +case VE::pvslalo_vvsMvl: return 5; +case VE::pvslaup_vvvl: return 3; +case VE::pvslaup_vvvvl: return 4; +case VE::pvslaup_vvsl: return 3; +case VE::pvslaup_vvsvl: return 4; +case VE::pvslaup_vvvMvl: return 5; +case VE::pvslaup_vvsMvl: return 5; +case VE::pvsla_vvvl: return 3; +case VE::pvsla_vvvvl: return 4; +case VE::pvsla_vvsl: return 3; +case VE::pvsla_vvsvl: return 4; +case VE::pvsla_vvvMvl: return 5; +case VE::pvsla_vvsMvl: return 5; +case VE::vslal_vvvl: return 3; +case VE::vslal_vvvvl: return 4; +case VE::vslal_vvsl: return 3; +case VE::vslal_vvsvl: return 4; +case VE::vslal_vvIl: return 3; +case VE::vslal_vvIvl: return 4; +case VE::vslal_vvvmvl: return 5; +case VE::vslal_vvsmvl: return 5; +case VE::vslal_vvImvl: return 5; +case VE::vsraw_vvvl: return 3; +case VE::vsraw_vvvvl: return 4; +case VE::vsraw_vvsl: return 3; +case VE::vsraw_vvsvl: return 4; +case VE::vsraw_vvIl: return 3; +case VE::vsraw_vvIvl: return 4; +case VE::vsraw_vvvmvl: return 5; +case VE::vsraw_vvsmvl: return 5; +case VE::vsraw_vvImvl: return 5; +case VE::pvsralo_vvvl: return 3; +case VE::pvsralo_vvvvl: return 4; +case VE::pvsralo_vvsl: return 3; +case VE::pvsralo_vvsvl: return 4; +case VE::pvsralo_vvvMvl: return 5; +case VE::pvsralo_vvsMvl: return 5; +case VE::pvsraup_vvvl: return 3; +case VE::pvsraup_vvvvl: return 4; +case VE::pvsraup_vvsl: return 3; +case VE::pvsraup_vvsvl: return 4; +case VE::pvsraup_vvvMvl: return 5; +case VE::pvsraup_vvsMvl: return 5; +case VE::pvsra_vvvl: return 3; +case VE::pvsra_vvvvl: return 4; +case VE::pvsra_vvsl: return 3; +case VE::pvsra_vvsvl: return 4; +case VE::pvsra_vvvMvl: return 5; +case VE::pvsra_vvsMvl: return 5; +case VE::vsral_vvvl: return 3; +case VE::vsral_vvvvl: return 4; +case VE::vsral_vvsl: return 3; +case VE::vsral_vvsvl: return 4; +case VE::vsral_vvIl: return 3; +case VE::vsral_vvIvl: return 4; +case VE::vsral_vvvmvl: return 5; +case VE::vsral_vvsmvl: return 5; +case VE::vsral_vvImvl: return 5; +case VE::vsfa_vvssl: return 4; +case VE::vsfa_vvssvl: return 5; +case VE::vsfa_vvIsl: return 4; +case VE::vsfa_vvIsvl: return 5; +case VE::vsfa_vvssmvl: return 6; +case VE::vsfa_vvIsmvl: return 6; +case VE::vfaddd_vvvl: return 3; +case VE::vfaddd_vvvvl: return 4; +case VE::vfaddd_vsvl: return 3; +case VE::vfaddd_vsvvl: return 4; +case VE::vfaddd_vvvmvl: return 5; +case VE::vfaddd_vsvmvl: return 5; +case VE::vfadds_vvvl: return 3; +case VE::vfadds_vvvvl: return 4; +case VE::vfadds_vsvl: return 3; +case VE::vfadds_vsvvl: return 4; +case VE::vfadds_vvvmvl: return 5; +case VE::vfadds_vsvmvl: return 5; +case VE::pvfadd_vvvl: return 3; +case VE::pvfadd_vvvvl: return 4; +case VE::pvfadd_vsvl: return 3; +case VE::pvfadd_vsvvl: return 4; +case VE::pvfadd_vvvMvl: return 5; +case VE::pvfadd_vsvMvl: return 5; +case VE::vfsubd_vvvl: return 3; +case VE::vfsubd_vvvvl: return 4; +case VE::vfsubd_vsvl: return 3; +case VE::vfsubd_vsvvl: return 4; +case VE::vfsubd_vvvmvl: return 5; +case VE::vfsubd_vsvmvl: return 5; +case VE::vfsubs_vvvl: return 3; +case VE::vfsubs_vvvvl: return 4; +case VE::vfsubs_vsvl: return 3; +case VE::vfsubs_vsvvl: return 4; +case VE::vfsubs_vvvmvl: return 5; +case VE::vfsubs_vsvmvl: return 5; +case VE::pvfsub_vvvl: return 3; +case VE::pvfsub_vvvvl: return 4; +case VE::pvfsub_vsvl: return 3; +case VE::pvfsub_vsvvl: return 4; +case VE::pvfsub_vvvMvl: return 5; +case VE::pvfsub_vsvMvl: return 5; +case VE::vfmuld_vvvl: return 3; +case VE::vfmuld_vvvvl: return 4; +case VE::vfmuld_vsvl: return 3; +case VE::vfmuld_vsvvl: return 4; +case VE::vfmuld_vvvmvl: return 5; +case VE::vfmuld_vsvmvl: return 5; +case VE::vfmuls_vvvl: return 3; +case VE::vfmuls_vvvvl: return 4; +case VE::vfmuls_vsvl: return 3; +case VE::vfmuls_vsvvl: return 4; +case VE::vfmuls_vvvmvl: return 5; +case VE::vfmuls_vsvmvl: return 5; +case VE::pvfmul_vvvl: return 3; +case VE::pvfmul_vvvvl: return 4; +case VE::pvfmul_vsvl: return 3; +case VE::pvfmul_vsvvl: return 4; +case VE::pvfmul_vvvMvl: return 5; +case VE::pvfmul_vsvMvl: return 5; +case VE::vfdivd_vvvl: return 3; +case VE::vfdivd_vvvvl: return 4; +case VE::vfdivd_vsvl: return 3; +case VE::vfdivd_vsvvl: return 4; +case VE::vfdivd_vvvmvl: return 5; +case VE::vfdivd_vsvmvl: return 5; +case VE::vfdivs_vvvl: return 3; +case VE::vfdivs_vvvvl: return 4; +case VE::vfdivs_vsvl: return 3; +case VE::vfdivs_vsvvl: return 4; +case VE::vfdivs_vvvmvl: return 5; +case VE::vfdivs_vsvmvl: return 5; +case VE::vfsqrtd_vvl: return 2; +case VE::vfsqrtd_vvvl: return 3; +case VE::vfsqrts_vvl: return 2; +case VE::vfsqrts_vvvl: return 3; +case VE::vfcmpd_vvvl: return 3; +case VE::vfcmpd_vvvvl: return 4; +case VE::vfcmpd_vsvl: return 3; +case VE::vfcmpd_vsvvl: return 4; +case VE::vfcmpd_vvvmvl: return 5; +case VE::vfcmpd_vsvmvl: return 5; +case VE::vfcmps_vvvl: return 3; +case VE::vfcmps_vvvvl: return 4; +case VE::vfcmps_vsvl: return 3; +case VE::vfcmps_vsvvl: return 4; +case VE::vfcmps_vvvmvl: return 5; +case VE::vfcmps_vsvmvl: return 5; +case VE::pvfcmp_vvvl: return 3; +case VE::pvfcmp_vvvvl: return 4; +case VE::pvfcmp_vsvl: return 3; +case VE::pvfcmp_vsvvl: return 4; +case VE::pvfcmp_vvvMvl: return 5; +case VE::pvfcmp_vsvMvl: return 5; +case VE::vfmaxd_vvvl: return 3; +case VE::vfmaxd_vvvvl: return 4; +case VE::vfmaxd_vsvl: return 3; +case VE::vfmaxd_vsvvl: return 4; +case VE::vfmaxd_vvvmvl: return 5; +case VE::vfmaxd_vsvmvl: return 5; +case VE::vfmaxs_vvvl: return 3; +case VE::vfmaxs_vvvvl: return 4; +case VE::vfmaxs_vsvl: return 3; +case VE::vfmaxs_vsvvl: return 4; +case VE::vfmaxs_vvvmvl: return 5; +case VE::vfmaxs_vsvmvl: return 5; +case VE::pvfmax_vvvl: return 3; +case VE::pvfmax_vvvvl: return 4; +case VE::pvfmax_vsvl: return 3; +case VE::pvfmax_vsvvl: return 4; +case VE::pvfmax_vvvMvl: return 5; +case VE::pvfmax_vsvMvl: return 5; +case VE::vfmind_vvvl: return 3; +case VE::vfmind_vvvvl: return 4; +case VE::vfmind_vsvl: return 3; +case VE::vfmind_vsvvl: return 4; +case VE::vfmind_vvvmvl: return 5; +case VE::vfmind_vsvmvl: return 5; +case VE::vfmins_vvvl: return 3; +case VE::vfmins_vvvvl: return 4; +case VE::vfmins_vsvl: return 3; +case VE::vfmins_vsvvl: return 4; +case VE::vfmins_vvvmvl: return 5; +case VE::vfmins_vsvmvl: return 5; +case VE::pvfmin_vvvl: return 3; +case VE::pvfmin_vvvvl: return 4; +case VE::pvfmin_vsvl: return 3; +case VE::pvfmin_vsvvl: return 4; +case VE::pvfmin_vvvMvl: return 5; +case VE::pvfmin_vsvMvl: return 5; +case VE::vfmadd_vvvvl: return 4; +case VE::vfmadd_vvvvvl: return 5; +case VE::vfmadd_vsvvl: return 4; +case VE::vfmadd_vsvvvl: return 5; +case VE::vfmadd_vvsvl: return 4; +case VE::vfmadd_vvsvvl: return 5; +case VE::vfmadd_vvvvmvl: return 6; +case VE::vfmadd_vsvvmvl: return 6; +case VE::vfmadd_vvsvmvl: return 6; +case VE::vfmads_vvvvl: return 4; +case VE::vfmads_vvvvvl: return 5; +case VE::vfmads_vsvvl: return 4; +case VE::vfmads_vsvvvl: return 5; +case VE::vfmads_vvsvl: return 4; +case VE::vfmads_vvsvvl: return 5; +case VE::vfmads_vvvvmvl: return 6; +case VE::vfmads_vsvvmvl: return 6; +case VE::vfmads_vvsvmvl: return 6; +case VE::pvfmad_vvvvl: return 4; +case VE::pvfmad_vvvvvl: return 5; +case VE::pvfmad_vsvvl: return 4; +case VE::pvfmad_vsvvvl: return 5; +case VE::pvfmad_vvsvl: return 4; +case VE::pvfmad_vvsvvl: return 5; +case VE::pvfmad_vvvvMvl: return 6; +case VE::pvfmad_vsvvMvl: return 6; +case VE::pvfmad_vvsvMvl: return 6; +case VE::vfmsbd_vvvvl: return 4; +case VE::vfmsbd_vvvvvl: return 5; +case VE::vfmsbd_vsvvl: return 4; +case VE::vfmsbd_vsvvvl: return 5; +case VE::vfmsbd_vvsvl: return 4; +case VE::vfmsbd_vvsvvl: return 5; +case VE::vfmsbd_vvvvmvl: return 6; +case VE::vfmsbd_vsvvmvl: return 6; +case VE::vfmsbd_vvsvmvl: return 6; +case VE::vfmsbs_vvvvl: return 4; +case VE::vfmsbs_vvvvvl: return 5; +case VE::vfmsbs_vsvvl: return 4; +case VE::vfmsbs_vsvvvl: return 5; +case VE::vfmsbs_vvsvl: return 4; +case VE::vfmsbs_vvsvvl: return 5; +case VE::vfmsbs_vvvvmvl: return 6; +case VE::vfmsbs_vsvvmvl: return 6; +case VE::vfmsbs_vvsvmvl: return 6; +case VE::pvfmsb_vvvvl: return 4; +case VE::pvfmsb_vvvvvl: return 5; +case VE::pvfmsb_vsvvl: return 4; +case VE::pvfmsb_vsvvvl: return 5; +case VE::pvfmsb_vvsvl: return 4; +case VE::pvfmsb_vvsvvl: return 5; +case VE::pvfmsb_vvvvMvl: return 6; +case VE::pvfmsb_vsvvMvl: return 6; +case VE::pvfmsb_vvsvMvl: return 6; +case VE::vfnmadd_vvvvl: return 4; +case VE::vfnmadd_vvvvvl: return 5; +case VE::vfnmadd_vsvvl: return 4; +case VE::vfnmadd_vsvvvl: return 5; +case VE::vfnmadd_vvsvl: return 4; +case VE::vfnmadd_vvsvvl: return 5; +case VE::vfnmadd_vvvvmvl: return 6; +case VE::vfnmadd_vsvvmvl: return 6; +case VE::vfnmadd_vvsvmvl: return 6; +case VE::vfnmads_vvvvl: return 4; +case VE::vfnmads_vvvvvl: return 5; +case VE::vfnmads_vsvvl: return 4; +case VE::vfnmads_vsvvvl: return 5; +case VE::vfnmads_vvsvl: return 4; +case VE::vfnmads_vvsvvl: return 5; +case VE::vfnmads_vvvvmvl: return 6; +case VE::vfnmads_vsvvmvl: return 6; +case VE::vfnmads_vvsvmvl: return 6; +case VE::pvfnmad_vvvvl: return 4; +case VE::pvfnmad_vvvvvl: return 5; +case VE::pvfnmad_vsvvl: return 4; +case VE::pvfnmad_vsvvvl: return 5; +case VE::pvfnmad_vvsvl: return 4; +case VE::pvfnmad_vvsvvl: return 5; +case VE::pvfnmad_vvvvMvl: return 6; +case VE::pvfnmad_vsvvMvl: return 6; +case VE::pvfnmad_vvsvMvl: return 6; +case VE::vfnmsbd_vvvvl: return 4; +case VE::vfnmsbd_vvvvvl: return 5; +case VE::vfnmsbd_vsvvl: return 4; +case VE::vfnmsbd_vsvvvl: return 5; +case VE::vfnmsbd_vvsvl: return 4; +case VE::vfnmsbd_vvsvvl: return 5; +case VE::vfnmsbd_vvvvmvl: return 6; +case VE::vfnmsbd_vsvvmvl: return 6; +case VE::vfnmsbd_vvsvmvl: return 6; +case VE::vfnmsbs_vvvvl: return 4; +case VE::vfnmsbs_vvvvvl: return 5; +case VE::vfnmsbs_vsvvl: return 4; +case VE::vfnmsbs_vsvvvl: return 5; +case VE::vfnmsbs_vvsvl: return 4; +case VE::vfnmsbs_vvsvvl: return 5; +case VE::vfnmsbs_vvvvmvl: return 6; +case VE::vfnmsbs_vsvvmvl: return 6; +case VE::vfnmsbs_vvsvmvl: return 6; +case VE::pvfnmsb_vvvvl: return 4; +case VE::pvfnmsb_vvvvvl: return 5; +case VE::pvfnmsb_vsvvl: return 4; +case VE::pvfnmsb_vsvvvl: return 5; +case VE::pvfnmsb_vvsvl: return 4; +case VE::pvfnmsb_vvsvvl: return 5; +case VE::pvfnmsb_vvvvMvl: return 6; +case VE::pvfnmsb_vsvvMvl: return 6; +case VE::pvfnmsb_vvsvMvl: return 6; +case VE::vrcpd_vvl: return 2; +case VE::vrcpd_vvvl: return 3; +case VE::vrcps_vvl: return 2; +case VE::vrcps_vvvl: return 3; +case VE::pvrcp_vvl: return 2; +case VE::pvrcp_vvvl: return 3; +case VE::vrsqrtd_vvl: return 2; +case VE::vrsqrtd_vvvl: return 3; +case VE::vrsqrts_vvl: return 2; +case VE::vrsqrts_vvvl: return 3; +case VE::pvrsqrt_vvl: return 2; +case VE::pvrsqrt_vvvl: return 3; +case VE::vrsqrtdnex_vvl: return 2; +case VE::vrsqrtdnex_vvvl: return 3; +case VE::vrsqrtsnex_vvl: return 2; +case VE::vrsqrtsnex_vvvl: return 3; +case VE::pvrsqrtnex_vvl: return 2; +case VE::pvrsqrtnex_vvvl: return 3; +case VE::vcvtwdsx_vvl: return 2; +case VE::vcvtwdsx_vvvl: return 3; +case VE::vcvtwdsx_vvmvl: return 4; +case VE::vcvtwdsxrz_vvl: return 2; +case VE::vcvtwdsxrz_vvvl: return 3; +case VE::vcvtwdsxrz_vvmvl: return 4; +case VE::vcvtwdzx_vvl: return 2; +case VE::vcvtwdzx_vvvl: return 3; +case VE::vcvtwdzx_vvmvl: return 4; +case VE::vcvtwdzxrz_vvl: return 2; +case VE::vcvtwdzxrz_vvvl: return 3; +case VE::vcvtwdzxrz_vvmvl: return 4; +case VE::vcvtwssx_vvl: return 2; +case VE::vcvtwssx_vvvl: return 3; +case VE::vcvtwssx_vvmvl: return 4; +case VE::vcvtwssxrz_vvl: return 2; +case VE::vcvtwssxrz_vvvl: return 3; +case VE::vcvtwssxrz_vvmvl: return 4; +case VE::vcvtwszx_vvl: return 2; +case VE::vcvtwszx_vvvl: return 3; +case VE::vcvtwszx_vvmvl: return 4; +case VE::vcvtwszxrz_vvl: return 2; +case VE::vcvtwszxrz_vvvl: return 3; +case VE::vcvtwszxrz_vvmvl: return 4; +case VE::pvcvtws_vvl: return 2; +case VE::pvcvtws_vvvl: return 3; +case VE::pvcvtws_vvMvl: return 4; +case VE::pvcvtwsrz_vvl: return 2; +case VE::pvcvtwsrz_vvvl: return 3; +case VE::pvcvtwsrz_vvMvl: return 4; +case VE::vcvtld_vvl: return 2; +case VE::vcvtld_vvvl: return 3; +case VE::vcvtld_vvmvl: return 4; +case VE::vcvtldrz_vvl: return 2; +case VE::vcvtldrz_vvvl: return 3; +case VE::vcvtldrz_vvmvl: return 4; +case VE::vcvtdw_vvl: return 2; +case VE::vcvtdw_vvvl: return 3; +case VE::vcvtsw_vvl: return 2; +case VE::vcvtsw_vvvl: return 3; +case VE::pvcvtsw_vvl: return 2; +case VE::pvcvtsw_vvvl: return 3; +case VE::vcvtdl_vvl: return 2; +case VE::vcvtdl_vvvl: return 3; +case VE::vcvtds_vvl: return 2; +case VE::vcvtds_vvvl: return 3; +case VE::vcvtsd_vvl: return 2; +case VE::vcvtsd_vvvl: return 3; +case VE::vmrg_vvvml: return 4; +case VE::vmrg_vvvmvl: return 5; +case VE::vmrg_vsvml: return 4; +case VE::vmrg_vsvmvl: return 5; +case VE::vmrg_vIvml: return 4; +case VE::vmrg_vIvmvl: return 5; +case VE::vmrgw_vvvMl: return 4; +case VE::vmrgw_vvvMvl: return 5; +case VE::vmrgw_vsvMl: return 4; +case VE::vmrgw_vsvMvl: return 5; +case VE::vshf_vvvsl: return 4; +case VE::vshf_vvvsvl: return 5; +case VE::vshf_vvvIl: return 4; +case VE::vshf_vvvIvl: return 5; +case VE::vcp_vvmvl: return 4; +case VE::vex_vvmvl: return 4; +case VE::vfmklat_ml: return 1; +case VE::vfmklaf_ml: return 1; +case VE::pvfmkwloat_ml: return 1; +case VE::pvfmkwupat_ml: return 1; +case VE::pvfmkwloaf_ml: return 1; +case VE::pvfmkwupaf_ml: return 1; +case VE::pvfmkat_Ml: return 1; +case VE::pvfmkaf_Ml: return 1; +case VE::vfmklgt_mvl: return 2; +case VE::vfmklgt_mvml: return 3; +case VE::vfmkllt_mvl: return 2; +case VE::vfmkllt_mvml: return 3; +case VE::vfmklne_mvl: return 2; +case VE::vfmklne_mvml: return 3; +case VE::vfmkleq_mvl: return 2; +case VE::vfmkleq_mvml: return 3; +case VE::vfmklge_mvl: return 2; +case VE::vfmklge_mvml: return 3; +case VE::vfmklle_mvl: return 2; +case VE::vfmklle_mvml: return 3; +case VE::vfmklnum_mvl: return 2; +case VE::vfmklnum_mvml: return 3; +case VE::vfmklnan_mvl: return 2; +case VE::vfmklnan_mvml: return 3; +case VE::vfmklgtnan_mvl: return 2; +case VE::vfmklgtnan_mvml: return 3; +case VE::vfmklltnan_mvl: return 2; +case VE::vfmklltnan_mvml: return 3; +case VE::vfmklnenan_mvl: return 2; +case VE::vfmklnenan_mvml: return 3; +case VE::vfmkleqnan_mvl: return 2; +case VE::vfmkleqnan_mvml: return 3; +case VE::vfmklgenan_mvl: return 2; +case VE::vfmklgenan_mvml: return 3; +case VE::vfmkllenan_mvl: return 2; +case VE::vfmkllenan_mvml: return 3; +case VE::vfmkwgt_mvl: return 2; +case VE::vfmkwgt_mvml: return 3; +case VE::vfmkwlt_mvl: return 2; +case VE::vfmkwlt_mvml: return 3; +case VE::vfmkwne_mvl: return 2; +case VE::vfmkwne_mvml: return 3; +case VE::vfmkweq_mvl: return 2; +case VE::vfmkweq_mvml: return 3; +case VE::vfmkwge_mvl: return 2; +case VE::vfmkwge_mvml: return 3; +case VE::vfmkwle_mvl: return 2; +case VE::vfmkwle_mvml: return 3; +case VE::vfmkwnum_mvl: return 2; +case VE::vfmkwnum_mvml: return 3; +case VE::vfmkwnan_mvl: return 2; +case VE::vfmkwnan_mvml: return 3; +case VE::vfmkwgtnan_mvl: return 2; +case VE::vfmkwgtnan_mvml: return 3; +case VE::vfmkwltnan_mvl: return 2; +case VE::vfmkwltnan_mvml: return 3; +case VE::vfmkwnenan_mvl: return 2; +case VE::vfmkwnenan_mvml: return 3; +case VE::vfmkweqnan_mvl: return 2; +case VE::vfmkweqnan_mvml: return 3; +case VE::vfmkwgenan_mvl: return 2; +case VE::vfmkwgenan_mvml: return 3; +case VE::vfmkwlenan_mvl: return 2; +case VE::vfmkwlenan_mvml: return 3; +case VE::pvfmkwlogt_mvl: return 2; +case VE::pvfmkwupgt_mvl: return 2; +case VE::pvfmkwlogt_mvml: return 3; +case VE::pvfmkwupgt_mvml: return 3; +case VE::pvfmkwlolt_mvl: return 2; +case VE::pvfmkwuplt_mvl: return 2; +case VE::pvfmkwlolt_mvml: return 3; +case VE::pvfmkwuplt_mvml: return 3; +case VE::pvfmkwlone_mvl: return 2; +case VE::pvfmkwupne_mvl: return 2; +case VE::pvfmkwlone_mvml: return 3; +case VE::pvfmkwupne_mvml: return 3; +case VE::pvfmkwloeq_mvl: return 2; +case VE::pvfmkwupeq_mvl: return 2; +case VE::pvfmkwloeq_mvml: return 3; +case VE::pvfmkwupeq_mvml: return 3; +case VE::pvfmkwloge_mvl: return 2; +case VE::pvfmkwupge_mvl: return 2; +case VE::pvfmkwloge_mvml: return 3; +case VE::pvfmkwupge_mvml: return 3; +case VE::pvfmkwlole_mvl: return 2; +case VE::pvfmkwuple_mvl: return 2; +case VE::pvfmkwlole_mvml: return 3; +case VE::pvfmkwuple_mvml: return 3; +case VE::pvfmkwlonum_mvl: return 2; +case VE::pvfmkwupnum_mvl: return 2; +case VE::pvfmkwlonum_mvml: return 3; +case VE::pvfmkwupnum_mvml: return 3; +case VE::pvfmkwlonan_mvl: return 2; +case VE::pvfmkwupnan_mvl: return 2; +case VE::pvfmkwlonan_mvml: return 3; +case VE::pvfmkwupnan_mvml: return 3; +case VE::pvfmkwlogtnan_mvl: return 2; +case VE::pvfmkwupgtnan_mvl: return 2; +case VE::pvfmkwlogtnan_mvml: return 3; +case VE::pvfmkwupgtnan_mvml: return 3; +case VE::pvfmkwloltnan_mvl: return 2; +case VE::pvfmkwupltnan_mvl: return 2; +case VE::pvfmkwloltnan_mvml: return 3; +case VE::pvfmkwupltnan_mvml: return 3; +case VE::pvfmkwlonenan_mvl: return 2; +case VE::pvfmkwupnenan_mvl: return 2; +case VE::pvfmkwlonenan_mvml: return 3; +case VE::pvfmkwupnenan_mvml: return 3; +case VE::pvfmkwloeqnan_mvl: return 2; +case VE::pvfmkwupeqnan_mvl: return 2; +case VE::pvfmkwloeqnan_mvml: return 3; +case VE::pvfmkwupeqnan_mvml: return 3; +case VE::pvfmkwlogenan_mvl: return 2; +case VE::pvfmkwupgenan_mvl: return 2; +case VE::pvfmkwlogenan_mvml: return 3; +case VE::pvfmkwupgenan_mvml: return 3; +case VE::pvfmkwlolenan_mvl: return 2; +case VE::pvfmkwuplenan_mvl: return 2; +case VE::pvfmkwlolenan_mvml: return 3; +case VE::pvfmkwuplenan_mvml: return 3; +case VE::pvfmkwgt_Mvl: return 2; +case VE::pvfmkwgt_MvMl: return 3; +case VE::pvfmkwlt_Mvl: return 2; +case VE::pvfmkwlt_MvMl: return 3; +case VE::pvfmkwne_Mvl: return 2; +case VE::pvfmkwne_MvMl: return 3; +case VE::pvfmkweq_Mvl: return 2; +case VE::pvfmkweq_MvMl: return 3; +case VE::pvfmkwge_Mvl: return 2; +case VE::pvfmkwge_MvMl: return 3; +case VE::pvfmkwle_Mvl: return 2; +case VE::pvfmkwle_MvMl: return 3; +case VE::pvfmkwnum_Mvl: return 2; +case VE::pvfmkwnum_MvMl: return 3; +case VE::pvfmkwnan_Mvl: return 2; +case VE::pvfmkwnan_MvMl: return 3; +case VE::pvfmkwgtnan_Mvl: return 2; +case VE::pvfmkwgtnan_MvMl: return 3; +case VE::pvfmkwltnan_Mvl: return 2; +case VE::pvfmkwltnan_MvMl: return 3; +case VE::pvfmkwnenan_Mvl: return 2; +case VE::pvfmkwnenan_MvMl: return 3; +case VE::pvfmkweqnan_Mvl: return 2; +case VE::pvfmkweqnan_MvMl: return 3; +case VE::pvfmkwgenan_Mvl: return 2; +case VE::pvfmkwgenan_MvMl: return 3; +case VE::pvfmkwlenan_Mvl: return 2; +case VE::pvfmkwlenan_MvMl: return 3; +case VE::vfmkdgt_mvl: return 2; +case VE::vfmkdgt_mvml: return 3; +case VE::vfmkdlt_mvl: return 2; +case VE::vfmkdlt_mvml: return 3; +case VE::vfmkdne_mvl: return 2; +case VE::vfmkdne_mvml: return 3; +case VE::vfmkdeq_mvl: return 2; +case VE::vfmkdeq_mvml: return 3; +case VE::vfmkdge_mvl: return 2; +case VE::vfmkdge_mvml: return 3; +case VE::vfmkdle_mvl: return 2; +case VE::vfmkdle_mvml: return 3; +case VE::vfmkdnum_mvl: return 2; +case VE::vfmkdnum_mvml: return 3; +case VE::vfmkdnan_mvl: return 2; +case VE::vfmkdnan_mvml: return 3; +case VE::vfmkdgtnan_mvl: return 2; +case VE::vfmkdgtnan_mvml: return 3; +case VE::vfmkdltnan_mvl: return 2; +case VE::vfmkdltnan_mvml: return 3; +case VE::vfmkdnenan_mvl: return 2; +case VE::vfmkdnenan_mvml: return 3; +case VE::vfmkdeqnan_mvl: return 2; +case VE::vfmkdeqnan_mvml: return 3; +case VE::vfmkdgenan_mvl: return 2; +case VE::vfmkdgenan_mvml: return 3; +case VE::vfmkdlenan_mvl: return 2; +case VE::vfmkdlenan_mvml: return 3; +case VE::vfmksgt_mvl: return 2; +case VE::vfmksgt_mvml: return 3; +case VE::vfmkslt_mvl: return 2; +case VE::vfmkslt_mvml: return 3; +case VE::vfmksne_mvl: return 2; +case VE::vfmksne_mvml: return 3; +case VE::vfmkseq_mvl: return 2; +case VE::vfmkseq_mvml: return 3; +case VE::vfmksge_mvl: return 2; +case VE::vfmksge_mvml: return 3; +case VE::vfmksle_mvl: return 2; +case VE::vfmksle_mvml: return 3; +case VE::vfmksnum_mvl: return 2; +case VE::vfmksnum_mvml: return 3; +case VE::vfmksnan_mvl: return 2; +case VE::vfmksnan_mvml: return 3; +case VE::vfmksgtnan_mvl: return 2; +case VE::vfmksgtnan_mvml: return 3; +case VE::vfmksltnan_mvl: return 2; +case VE::vfmksltnan_mvml: return 3; +case VE::vfmksnenan_mvl: return 2; +case VE::vfmksnenan_mvml: return 3; +case VE::vfmkseqnan_mvl: return 2; +case VE::vfmkseqnan_mvml: return 3; +case VE::vfmksgenan_mvl: return 2; +case VE::vfmksgenan_mvml: return 3; +case VE::vfmkslenan_mvl: return 2; +case VE::vfmkslenan_mvml: return 3; +case VE::pvfmkslogt_mvl: return 2; +case VE::pvfmksupgt_mvl: return 2; +case VE::pvfmkslogt_mvml: return 3; +case VE::pvfmksupgt_mvml: return 3; +case VE::pvfmkslolt_mvl: return 2; +case VE::pvfmksuplt_mvl: return 2; +case VE::pvfmkslolt_mvml: return 3; +case VE::pvfmksuplt_mvml: return 3; +case VE::pvfmkslone_mvl: return 2; +case VE::pvfmksupne_mvl: return 2; +case VE::pvfmkslone_mvml: return 3; +case VE::pvfmksupne_mvml: return 3; +case VE::pvfmksloeq_mvl: return 2; +case VE::pvfmksupeq_mvl: return 2; +case VE::pvfmksloeq_mvml: return 3; +case VE::pvfmksupeq_mvml: return 3; +case VE::pvfmksloge_mvl: return 2; +case VE::pvfmksupge_mvl: return 2; +case VE::pvfmksloge_mvml: return 3; +case VE::pvfmksupge_mvml: return 3; +case VE::pvfmkslole_mvl: return 2; +case VE::pvfmksuple_mvl: return 2; +case VE::pvfmkslole_mvml: return 3; +case VE::pvfmksuple_mvml: return 3; +case VE::pvfmkslonum_mvl: return 2; +case VE::pvfmksupnum_mvl: return 2; +case VE::pvfmkslonum_mvml: return 3; +case VE::pvfmksupnum_mvml: return 3; +case VE::pvfmkslonan_mvl: return 2; +case VE::pvfmksupnan_mvl: return 2; +case VE::pvfmkslonan_mvml: return 3; +case VE::pvfmksupnan_mvml: return 3; +case VE::pvfmkslogtnan_mvl: return 2; +case VE::pvfmksupgtnan_mvl: return 2; +case VE::pvfmkslogtnan_mvml: return 3; +case VE::pvfmksupgtnan_mvml: return 3; +case VE::pvfmksloltnan_mvl: return 2; +case VE::pvfmksupltnan_mvl: return 2; +case VE::pvfmksloltnan_mvml: return 3; +case VE::pvfmksupltnan_mvml: return 3; +case VE::pvfmkslonenan_mvl: return 2; +case VE::pvfmksupnenan_mvl: return 2; +case VE::pvfmkslonenan_mvml: return 3; +case VE::pvfmksupnenan_mvml: return 3; +case VE::pvfmksloeqnan_mvl: return 2; +case VE::pvfmksupeqnan_mvl: return 2; +case VE::pvfmksloeqnan_mvml: return 3; +case VE::pvfmksupeqnan_mvml: return 3; +case VE::pvfmkslogenan_mvl: return 2; +case VE::pvfmksupgenan_mvl: return 2; +case VE::pvfmkslogenan_mvml: return 3; +case VE::pvfmksupgenan_mvml: return 3; +case VE::pvfmkslolenan_mvl: return 2; +case VE::pvfmksuplenan_mvl: return 2; +case VE::pvfmkslolenan_mvml: return 3; +case VE::pvfmksuplenan_mvml: return 3; +case VE::pvfmksgt_Mvl: return 2; +case VE::pvfmksgt_MvMl: return 3; +case VE::pvfmkslt_Mvl: return 2; +case VE::pvfmkslt_MvMl: return 3; +case VE::pvfmksne_Mvl: return 2; +case VE::pvfmksne_MvMl: return 3; +case VE::pvfmkseq_Mvl: return 2; +case VE::pvfmkseq_MvMl: return 3; +case VE::pvfmksge_Mvl: return 2; +case VE::pvfmksge_MvMl: return 3; +case VE::pvfmksle_Mvl: return 2; +case VE::pvfmksle_MvMl: return 3; +case VE::pvfmksnum_Mvl: return 2; +case VE::pvfmksnum_MvMl: return 3; +case VE::pvfmksnan_Mvl: return 2; +case VE::pvfmksnan_MvMl: return 3; +case VE::pvfmksgtnan_Mvl: return 2; +case VE::pvfmksgtnan_MvMl: return 3; +case VE::pvfmksltnan_Mvl: return 2; +case VE::pvfmksltnan_MvMl: return 3; +case VE::pvfmksnenan_Mvl: return 2; +case VE::pvfmksnenan_MvMl: return 3; +case VE::pvfmkseqnan_Mvl: return 2; +case VE::pvfmkseqnan_MvMl: return 3; +case VE::pvfmksgenan_Mvl: return 2; +case VE::pvfmksgenan_MvMl: return 3; +case VE::pvfmkslenan_Mvl: return 2; +case VE::pvfmkslenan_MvMl: return 3; +case VE::vsumwsx_vvl: return 2; +case VE::vsumwsx_vvml: return 3; +case VE::vsumwzx_vvl: return 2; +case VE::vsumwzx_vvml: return 3; +case VE::vsuml_vvl: return 2; +case VE::vsuml_vvml: return 3; +case VE::vfsumd_vvl: return 2; +case VE::vfsumd_vvml: return 3; +case VE::vfsums_vvl: return 2; +case VE::vfsums_vvml: return 3; +case VE::vrmaxswfstsx_vvl: return 2; +case VE::vrmaxswfstsx_vvvl: return 3; +case VE::vrmaxswlstsx_vvl: return 2; +case VE::vrmaxswlstsx_vvvl: return 3; +case VE::vrmaxswfstzx_vvl: return 2; +case VE::vrmaxswfstzx_vvvl: return 3; +case VE::vrmaxswlstzx_vvl: return 2; +case VE::vrmaxswlstzx_vvvl: return 3; +case VE::vrminswfstsx_vvl: return 2; +case VE::vrminswfstsx_vvvl: return 3; +case VE::vrminswlstsx_vvl: return 2; +case VE::vrminswlstsx_vvvl: return 3; +case VE::vrminswfstzx_vvl: return 2; +case VE::vrminswfstzx_vvvl: return 3; +case VE::vrminswlstzx_vvl: return 2; +case VE::vrminswlstzx_vvvl: return 3; +case VE::vrmaxslfst_vvl: return 2; +case VE::vrmaxslfst_vvvl: return 3; +case VE::vrmaxsllst_vvl: return 2; +case VE::vrmaxsllst_vvvl: return 3; +case VE::vrminslfst_vvl: return 2; +case VE::vrminslfst_vvvl: return 3; +case VE::vrminsllst_vvl: return 2; +case VE::vrminsllst_vvvl: return 3; +case VE::vfrmaxdfst_vvl: return 2; +case VE::vfrmaxdfst_vvvl: return 3; +case VE::vfrmaxdlst_vvl: return 2; +case VE::vfrmaxdlst_vvvl: return 3; +case VE::vfrmaxsfst_vvl: return 2; +case VE::vfrmaxsfst_vvvl: return 3; +case VE::vfrmaxslst_vvl: return 2; +case VE::vfrmaxslst_vvvl: return 3; +case VE::vfrmindfst_vvl: return 2; +case VE::vfrmindfst_vvvl: return 3; +case VE::vfrmindlst_vvl: return 2; +case VE::vfrmindlst_vvvl: return 3; +case VE::vfrminsfst_vvl: return 2; +case VE::vfrminsfst_vvvl: return 3; +case VE::vfrminslst_vvl: return 2; +case VE::vfrminslst_vvvl: return 3; +case VE::vrand_vvl: return 2; +case VE::vrand_vvml: return 3; +case VE::vror_vvl: return 2; +case VE::vror_vvml: return 3; +case VE::vrxor_vvl: return 2; +case VE::vrxor_vvml: return 3; +case VE::vgt_vvssl: return 4; +case VE::vgt_vvssvl: return 5; +case VE::vgt_vvsZl: return 4; +case VE::vgt_vvsZvl: return 5; +case VE::vgt_vvIsl: return 4; +case VE::vgt_vvIsvl: return 5; +case VE::vgt_vvIZl: return 4; +case VE::vgt_vvIZvl: return 5; +case VE::vgt_vvssml: return 5; +case VE::vgt_vvssmvl: return 6; +case VE::vgt_vvsZml: return 5; +case VE::vgt_vvsZmvl: return 6; +case VE::vgt_vvIsml: return 5; +case VE::vgt_vvIsmvl: return 6; +case VE::vgt_vvIZml: return 5; +case VE::vgt_vvIZmvl: return 6; +case VE::vgtnc_vvssl: return 4; +case VE::vgtnc_vvssvl: return 5; +case VE::vgtnc_vvsZl: return 4; +case VE::vgtnc_vvsZvl: return 5; +case VE::vgtnc_vvIsl: return 4; +case VE::vgtnc_vvIsvl: return 5; +case VE::vgtnc_vvIZl: return 4; +case VE::vgtnc_vvIZvl: return 5; +case VE::vgtnc_vvssml: return 5; +case VE::vgtnc_vvssmvl: return 6; +case VE::vgtnc_vvsZml: return 5; +case VE::vgtnc_vvsZmvl: return 6; +case VE::vgtnc_vvIsml: return 5; +case VE::vgtnc_vvIsmvl: return 6; +case VE::vgtnc_vvIZml: return 5; +case VE::vgtnc_vvIZmvl: return 6; +case VE::vgtu_vvssl: return 4; +case VE::vgtu_vvssvl: return 5; +case VE::vgtu_vvsZl: return 4; +case VE::vgtu_vvsZvl: return 5; +case VE::vgtu_vvIsl: return 4; +case VE::vgtu_vvIsvl: return 5; +case VE::vgtu_vvIZl: return 4; +case VE::vgtu_vvIZvl: return 5; +case VE::vgtu_vvssml: return 5; +case VE::vgtu_vvssmvl: return 6; +case VE::vgtu_vvsZml: return 5; +case VE::vgtu_vvsZmvl: return 6; +case VE::vgtu_vvIsml: return 5; +case VE::vgtu_vvIsmvl: return 6; +case VE::vgtu_vvIZml: return 5; +case VE::vgtu_vvIZmvl: return 6; +case VE::vgtunc_vvssl: return 4; +case VE::vgtunc_vvssvl: return 5; +case VE::vgtunc_vvsZl: return 4; +case VE::vgtunc_vvsZvl: return 5; +case VE::vgtunc_vvIsl: return 4; +case VE::vgtunc_vvIsvl: return 5; +case VE::vgtunc_vvIZl: return 4; +case VE::vgtunc_vvIZvl: return 5; +case VE::vgtunc_vvssml: return 5; +case VE::vgtunc_vvssmvl: return 6; +case VE::vgtunc_vvsZml: return 5; +case VE::vgtunc_vvsZmvl: return 6; +case VE::vgtunc_vvIsml: return 5; +case VE::vgtunc_vvIsmvl: return 6; +case VE::vgtunc_vvIZml: return 5; +case VE::vgtunc_vvIZmvl: return 6; +case VE::vgtlsx_vvssl: return 4; +case VE::vgtlsx_vvssvl: return 5; +case VE::vgtlsx_vvsZl: return 4; +case VE::vgtlsx_vvsZvl: return 5; +case VE::vgtlsx_vvIsl: return 4; +case VE::vgtlsx_vvIsvl: return 5; +case VE::vgtlsx_vvIZl: return 4; +case VE::vgtlsx_vvIZvl: return 5; +case VE::vgtlsx_vvssml: return 5; +case VE::vgtlsx_vvssmvl: return 6; +case VE::vgtlsx_vvsZml: return 5; +case VE::vgtlsx_vvsZmvl: return 6; +case VE::vgtlsx_vvIsml: return 5; +case VE::vgtlsx_vvIsmvl: return 6; +case VE::vgtlsx_vvIZml: return 5; +case VE::vgtlsx_vvIZmvl: return 6; +case VE::vgtlsxnc_vvssl: return 4; +case VE::vgtlsxnc_vvssvl: return 5; +case VE::vgtlsxnc_vvsZl: return 4; +case VE::vgtlsxnc_vvsZvl: return 5; +case VE::vgtlsxnc_vvIsl: return 4; +case VE::vgtlsxnc_vvIsvl: return 5; +case VE::vgtlsxnc_vvIZl: return 4; +case VE::vgtlsxnc_vvIZvl: return 5; +case VE::vgtlsxnc_vvssml: return 5; +case VE::vgtlsxnc_vvssmvl: return 6; +case VE::vgtlsxnc_vvsZml: return 5; +case VE::vgtlsxnc_vvsZmvl: return 6; +case VE::vgtlsxnc_vvIsml: return 5; +case VE::vgtlsxnc_vvIsmvl: return 6; +case VE::vgtlsxnc_vvIZml: return 5; +case VE::vgtlsxnc_vvIZmvl: return 6; +case VE::vgtlzx_vvssl: return 4; +case VE::vgtlzx_vvssvl: return 5; +case VE::vgtlzx_vvsZl: return 4; +case VE::vgtlzx_vvsZvl: return 5; +case VE::vgtlzx_vvIsl: return 4; +case VE::vgtlzx_vvIsvl: return 5; +case VE::vgtlzx_vvIZl: return 4; +case VE::vgtlzx_vvIZvl: return 5; +case VE::vgtlzx_vvssml: return 5; +case VE::vgtlzx_vvssmvl: return 6; +case VE::vgtlzx_vvsZml: return 5; +case VE::vgtlzx_vvsZmvl: return 6; +case VE::vgtlzx_vvIsml: return 5; +case VE::vgtlzx_vvIsmvl: return 6; +case VE::vgtlzx_vvIZml: return 5; +case VE::vgtlzx_vvIZmvl: return 6; +case VE::vgtlzxnc_vvssl: return 4; +case VE::vgtlzxnc_vvssvl: return 5; +case VE::vgtlzxnc_vvsZl: return 4; +case VE::vgtlzxnc_vvsZvl: return 5; +case VE::vgtlzxnc_vvIsl: return 4; +case VE::vgtlzxnc_vvIsvl: return 5; +case VE::vgtlzxnc_vvIZl: return 4; +case VE::vgtlzxnc_vvIZvl: return 5; +case VE::vgtlzxnc_vvssml: return 5; +case VE::vgtlzxnc_vvssmvl: return 6; +case VE::vgtlzxnc_vvsZml: return 5; +case VE::vgtlzxnc_vvsZmvl: return 6; +case VE::vgtlzxnc_vvIsml: return 5; +case VE::vgtlzxnc_vvIsmvl: return 6; +case VE::vgtlzxnc_vvIZml: return 5; +case VE::vgtlzxnc_vvIZmvl: return 6; +case VE::vsc_vvssl: return 4; +case VE::vsc_vvsZl: return 4; +case VE::vsc_vvIsl: return 4; +case VE::vsc_vvIZl: return 4; +case VE::vsc_vvssml: return 5; +case VE::vsc_vvsZml: return 5; +case VE::vsc_vvIsml: return 5; +case VE::vsc_vvIZml: return 5; +case VE::vscnc_vvssl: return 4; +case VE::vscnc_vvsZl: return 4; +case VE::vscnc_vvIsl: return 4; +case VE::vscnc_vvIZl: return 4; +case VE::vscnc_vvssml: return 5; +case VE::vscnc_vvsZml: return 5; +case VE::vscnc_vvIsml: return 5; +case VE::vscnc_vvIZml: return 5; +case VE::vscot_vvssl: return 4; +case VE::vscot_vvsZl: return 4; +case VE::vscot_vvIsl: return 4; +case VE::vscot_vvIZl: return 4; +case VE::vscot_vvssml: return 5; +case VE::vscot_vvsZml: return 5; +case VE::vscot_vvIsml: return 5; +case VE::vscot_vvIZml: return 5; +case VE::vscncot_vvssl: return 4; +case VE::vscncot_vvsZl: return 4; +case VE::vscncot_vvIsl: return 4; +case VE::vscncot_vvIZl: return 4; +case VE::vscncot_vvssml: return 5; +case VE::vscncot_vvsZml: return 5; +case VE::vscncot_vvIsml: return 5; +case VE::vscncot_vvIZml: return 5; +case VE::vscu_vvssl: return 4; +case VE::vscu_vvsZl: return 4; +case VE::vscu_vvIsl: return 4; +case VE::vscu_vvIZl: return 4; +case VE::vscu_vvssml: return 5; +case VE::vscu_vvsZml: return 5; +case VE::vscu_vvIsml: return 5; +case VE::vscu_vvIZml: return 5; +case VE::vscunc_vvssl: return 4; +case VE::vscunc_vvsZl: return 4; +case VE::vscunc_vvIsl: return 4; +case VE::vscunc_vvIZl: return 4; +case VE::vscunc_vvssml: return 5; +case VE::vscunc_vvsZml: return 5; +case VE::vscunc_vvIsml: return 5; +case VE::vscunc_vvIZml: return 5; +case VE::vscuot_vvssl: return 4; +case VE::vscuot_vvsZl: return 4; +case VE::vscuot_vvIsl: return 4; +case VE::vscuot_vvIZl: return 4; +case VE::vscuot_vvssml: return 5; +case VE::vscuot_vvsZml: return 5; +case VE::vscuot_vvIsml: return 5; +case VE::vscuot_vvIZml: return 5; +case VE::vscuncot_vvssl: return 4; +case VE::vscuncot_vvsZl: return 4; +case VE::vscuncot_vvIsl: return 4; +case VE::vscuncot_vvIZl: return 4; +case VE::vscuncot_vvssml: return 5; +case VE::vscuncot_vvsZml: return 5; +case VE::vscuncot_vvIsml: return 5; +case VE::vscuncot_vvIZml: return 5; +case VE::vscl_vvssl: return 4; +case VE::vscl_vvsZl: return 4; +case VE::vscl_vvIsl: return 4; +case VE::vscl_vvIZl: return 4; +case VE::vscl_vvssml: return 5; +case VE::vscl_vvsZml: return 5; +case VE::vscl_vvIsml: return 5; +case VE::vscl_vvIZml: return 5; +case VE::vsclnc_vvssl: return 4; +case VE::vsclnc_vvsZl: return 4; +case VE::vsclnc_vvIsl: return 4; +case VE::vsclnc_vvIZl: return 4; +case VE::vsclnc_vvssml: return 5; +case VE::vsclnc_vvsZml: return 5; +case VE::vsclnc_vvIsml: return 5; +case VE::vsclnc_vvIZml: return 5; +case VE::vsclot_vvssl: return 4; +case VE::vsclot_vvsZl: return 4; +case VE::vsclot_vvIsl: return 4; +case VE::vsclot_vvIZl: return 4; +case VE::vsclot_vvssml: return 5; +case VE::vsclot_vvsZml: return 5; +case VE::vsclot_vvIsml: return 5; +case VE::vsclot_vvIZml: return 5; +case VE::vsclncot_vvssl: return 4; +case VE::vsclncot_vvsZl: return 4; +case VE::vsclncot_vvIsl: return 4; +case VE::vsclncot_vvIZl: return 4; +case VE::vsclncot_vvssml: return 5; +case VE::vsclncot_vvsZml: return 5; +case VE::vsclncot_vvIsml: return 5; +case VE::vsclncot_vvIZml: return 5; +case VE::pcvm_sml: return 2; +case VE::lzvm_sml: return 2; +case VE::tovm_sml: return 2; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4982,8 +4982,11 @@ LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister << " bits.\n"); +#if 0 assert(MaxVectorSize <= 256 && "Did not expect to pack so many elements" " into one vector!"); +#endif + if (MaxVectorSize == 0) { LLVM_DEBUG(dbgs() << "LV: The target has no vector registers.\n"); MaxVectorSize = 1; diff --git a/llvm/test/CodeGen/VE/add.ll b/llvm/test/CodeGen/VE/add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/add.ll @@ -0,0 +1,9 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +define i32 @sample_add(i32, i32) { +; CHECK-LABEL: sample_add: +; CHECK: .LBB0_2: +; CHECK-NEXT: adds.w.sx %s0, %s1, %s0 + %3 = add nsw i32 %1, %0 + ret i32 %3 +} diff --git a/llvm/test/CodeGen/VE/addition.ll b/llvm/test/CodeGen/VE/addition.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/addition.ll @@ -0,0 +1,262 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +define signext i8 @func1(i8 signext, i8 signext) { +; CHECK-LABEL: func1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.w.sx %s34, %s1, %s0 + %3 = add i8 %1, %0 + ret i8 %3 +} + +define signext i16 @func2(i16 signext, i16 signext) { +; CHECK-LABEL: func2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.w.sx %s34, %s1, %s0 + %3 = add i16 %1, %0 + ret i16 %3 +} + +define i32 @func3(i32, i32) { +; CHECK-LABEL: func3: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.w.sx %s0, %s1, %s0 + %3 = add nsw i32 %1, %0 + ret i32 %3 +} + +define i64 @func4(i64, i64) { +; CHECK-LABEL: func4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.l %s0, %s1, %s0 + %3 = add nsw i64 %1, %0 + ret i64 %3 +} + +define i128 @func5(i128, i128) { +; CHECK-LABEL: func5: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.l %s34, %s3, %s1 +; CHECK-NEXT: adds.l %s0, %s2, %s0 +; CHECK-NEXT: cmpu.l %s35, %s0, %s2 +; CHECK-NEXT: or %s36, 0, (0)1 +; CHECK-NEXT: cmov.l.lt %s36, (63)0, %s35 +; CHECK-NEXT: adds.w.zx %s35, %s36, (0)1 +; CHECK-NEXT: adds.l %s1, %s34, %s35 + %3 = add nsw i128 %1, %0 + ret i128 %3 +} + +define zeroext i8 @func6(i8 zeroext, i8 zeroext) { +; CHECK-LABEL: func6: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.w.sx %s34, %s1, %s0 + %3 = add i8 %1, %0 + ret i8 %3 +} + +define zeroext i16 @func7(i16 zeroext, i16 zeroext) { +; CHECK-LABEL: func7: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.w.sx %s34, %s1, %s0 + %3 = add i16 %1, %0 + ret i16 %3 +} + +define i32 @func8(i32, i32) { +; CHECK-LABEL: func8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.w.sx %s0, %s1, %s0 + %3 = add i32 %1, %0 + ret i32 %3 +} + +define i64 @func9(i64, i64) { +; CHECK-LABEL: func9: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.l %s0, %s1, %s0 + %3 = add i64 %1, %0 + ret i64 %3 +} + +define i128 @func10(i128, i128) { +; CHECK-LABEL: func10: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: adds.l %s34, %s3, %s1 +; CHECK-NEXT: adds.l %s0, %s2, %s0 +; CHECK-NEXT: cmpu.l %s35, %s0, %s2 +; CHECK-NEXT: or %s36, 0, (0)1 +; CHECK-NEXT: cmov.l.lt %s36, (63)0, %s35 +; CHECK-NEXT: adds.w.zx %s35, %s36, (0)1 +; CHECK-NEXT: adds.l %s1, %s34, %s35 + %3 = add i128 %1, %0 + ret i128 %3 +} + +define float @func11(float, float) { +; CHECK-LABEL: func11: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fadd.s %s0, %s0, %s1 + %3 = fadd float %0, %1 + ret float %3 +} + +define double @func12(double, double) { +; CHECK-LABEL: func12: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fadd.d %s0, %s0, %s1 + %3 = fadd double %0, %1 + ret double %3 +} + +define signext i8 @func13(i8 signext) { +; CHECK-LABEL: func13: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, 5(%s0) +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 + %2 = add i8 %0, 5 + ret i8 %2 +} + +define signext i16 @func14(i16 signext) { +; CHECK-LABEL: func14: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, 5(%s0) +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 + %2 = add i16 %0, 5 + ret i16 %2 +} + +define i32 @func15(i32) { +; CHECK-LABEL: func15: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s0, 5(%s0) + %2 = add nsw i32 %0, 5 + ret i32 %2 +} + +define i64 @func16(i64) { +; CHECK-LABEL: func16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s0, 5(%s0) + %2 = add nsw i64 %0, 5 + ret i64 %2 +} + +define i128 @func17(i128) { +; CHECK-LABEL: func17: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, 5(%s0) +; CHECK-NEXT: cmpu.l %s35, %s34, %s0 +; CHECK-NEXT: or %s36, 0, (0)1 +; CHECK-NEXT: cmov.l.lt %s36, (63)0, %s35 +; CHECK-NEXT: adds.w.zx %s35, %s36, (0)1 +; CHECK-NEXT: adds.l %s1, %s1, %s35 +; CHECK-NEXT: or %s0, 0, %s34 + %2 = add nsw i128 %0, 5 + ret i128 %2 +} + +define zeroext i8 @func18(i8 zeroext) { +; CHECK-LABEL: func18: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, 5(%s0) +; CHECK-NEXT: and %s0, %s34, (56)0 + %2 = add i8 %0, 5 + ret i8 %2 +} + +define zeroext i16 @func19(i16 zeroext) { +; CHECK-LABEL: func19: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, 5(%s0) +; CHECK-NEXT: and %s0, %s34, (48)0 + %2 = add i16 %0, 5 + ret i16 %2 +} + +define i32 @func20(i32) { +; CHECK-LABEL: func20: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s0, 5(%s0) + %2 = add i32 %0, 5 + ret i32 %2 +} + +define i64 @func21(i64) { +; CHECK-LABEL: func21: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s0, 5(%s0) + %2 = add i64 %0, 5 + ret i64 %2 +} + +define i128 @func22(i128) { +; CHECK-LABEL: func22: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, 5(%s0) +; CHECK-NEXT: cmpu.l %s35, %s34, %s0 +; CHECK-NEXT: or %s36, 0, (0)1 +; CHECK-NEXT: cmov.l.lt %s36, (63)0, %s35 +; CHECK-NEXT: adds.w.zx %s35, %s36, (0)1 +; CHECK-NEXT: adds.l %s1, %s1, %s35 +; CHECK-NEXT: or %s0, 0, %s34 + %2 = add i128 %0, 5 + ret i128 %2 +} + +define float @func23(float) { +; CHECK-LABEL: func23: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea.sl %s34, 1084227584 +; CHECK-NEXT: or %s34, 0, %s34 +; CHECK-NEXT: fadd.s %s0, %s0, %s34 + %2 = fadd float %0, 5.000000e+00 + ret float %2 +} + +define double @func24(double) { +; CHECK-LABEL: func24: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea.sl %s34, 1075052544 +; CHECK-NEXT: fadd.d %s0, %s0, %s34 + %2 = fadd double %0, 5.000000e+00 + ret double %2 +} + +define i32 @func25(i32) { +; CHECK-LABEL: func25: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, -2147483648 +; CHECK-NEXT: xor %s0, %s0, %s34 + %2 = xor i32 %0, -2147483648 + ret i32 %2 +} + +define i64 @func26(i64) { +; CHECK-LABEL: func26: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, -2147483648 +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: adds.l %s0, %s0, %s34 + %2 = add nsw i64 %0, 2147483648 + ret i64 %2 +} + +define i128 @func27(i128) { +; CHECK-LABEL: func27: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, -2147483648 +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: adds.l %s34, %s0, %s34 +; CHECK-NEXT: cmpu.l %s35, %s34, %s0 +; CHECK-NEXT: or %s36, 0, (0)1 +; CHECK-NEXT: cmov.l.lt %s36, (63)0, %s35 +; CHECK-NEXT: adds.w.zx %s35, %s36, (0)1 +; CHECK-NEXT: adds.l %s1, %s1, %s35 +; CHECK-NEXT: or %s0, 0, %s34 + %2 = add nsw i128 %0, 2147483648 + ret i128 %2 +} + diff --git a/llvm/test/CodeGen/VE/alloca.ll b/llvm/test/CodeGen/VE/alloca.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/alloca.ll @@ -0,0 +1,47 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +@buf = external global i8*, align 8 + +; Function Attrs: nounwind +define void @test(i32) { +; CHECK-LABEL: test: +; CHECK: .LBB0_2: +; CHECK-NEXT: adds.w.sx %s2, %s0, (0)1 +; CHECK-NEXT: lea %s34, 15(%s2) +; CHECK-NEXT: and %s0, -16, %s34 +; CHECK-NEXT: adds.l %s11, -64, %s11 +; CHECK-NEXT: lea %s34, __llvm_grow_stack@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __llvm_grow_stack@hi(%s34) +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: lea %s13, 64 +; CHECK-NEXT: and %s13, %s13, (32)0 +; CHECK-NEXT: lea.sl %s11, 0(%s11, %s13) +; CHECK-NEXT: lea %s1, 176(%s11) +; CHECK-NEXT: lea %s34, buf@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, buf@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: adds.l %s11, -64, %s11 +; CHECK-NEXT: lea %s34, memcpy@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, memcpy@hi(%s34) +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: lea %s13, 64 +; CHECK-NEXT: and %s13, %s13, (32)0 +; CHECK-NEXT: lea.sl %s11, 0(%s11, %s13) +; CHECK-NEXT: or %s11, 0, %s9 + %2 = sext i32 %0 to i64 + %3 = alloca i8, i64 %2, align 8 + %4 = load i8*, i8** @buf, align 8, !tbaa !2 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %4, i8* nonnull align 8 %3, i64 %2, i1 false) + ret void +} + +; Function Attrs: argmemonly nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) + +!2 = !{!3, !3, i64 0} +!3 = !{!"any pointer", !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C/C++ TBAA"} diff --git a/llvm/test/CodeGen/VE/atomic.ll b/llvm/test/CodeGen/VE/atomic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/atomic.ll @@ -0,0 +1,1869 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +@c = common global i8 0, align 32 +@s = common global i16 0, align 32 +@i = common global i32 0, align 32 +@l = common global i64 0, align 32 +@it= common global i128 0, align 32 +@ui = common global i32 0, align 32 + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_1() { +; CHECK-LABEL: test_atomic_store_1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 12, i8* @c release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_1seq() { +; CHECK-LABEL: test_atomic_store_1seq: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 12, i8* @c seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_2() { +; CHECK-LABEL: test_atomic_store_2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st2b %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i16 12, i16* @s release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_2seq() { +; CHECK-LABEL: test_atomic_store_2seq: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st2b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i16 12, i16* @s seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_4() { +; CHECK-LABEL: test_atomic_store_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: stl %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i32 12, i32* @i release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_4cst() { +; CHECK-LABEL: test_atomic_store_4cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: stl %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i32 12, i32* @i seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_8() { +; CHECK-LABEL: test_atomic_store_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i64 12, i64* @l release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_8cst() { +; CHECK-LABEL: test_atomic_store_8cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i64 12, i64* @l seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_16() { +; CHECK-LABEL: t_atomic_store_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_store_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_store_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 12, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 3, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i128 12, i128* @it release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_16cst() { +; CHECK-LABEL: test_atomic_store_16cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_store_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_store_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 12, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i128 12, i128* @it seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_load_1() { +; CHECK-LABEL: test_atomic_load_1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: ld1b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i8, i8* @c acquire, align 32 + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_load_1cst() { +; CHECK-LABEL: test_atomic_load_1cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: ld1b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i8, i8* @c seq_cst, align 32 + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_load_2() { +; CHECK-LABEL: test_atomic_load_2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: ld2b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i16, i16* @s acquire, align 32 + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_load_2cst() { +; CHECK-LABEL: test_atomic_load_2cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: ld2b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i16, i16* @s seq_cst, align 32 + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_load_4() { +; CHECK-LABEL: test_atomic_load_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.zx %s0, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i32, i32* @i acquire, align 32 + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_load_4cst() { +; CHECK-LABEL: test_atomic_load_4cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.zx %s0, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i32, i32* @i seq_cst, align 32 + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_load_8() { +; CHECK-LABEL: test_atomic_load_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i64, i64* @l acquire, align 32 + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_load_8cst() { +; CHECK-LABEL: test_atomic_load_8cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i64, i64* @l seq_cst, align 32 + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_load_16() { +; CHECK-LABEL: test_atomic_load_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_load_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_load_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 2, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i128, i128* @it acquire, align 32 + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_load_16cst() { +; CHECK-LABEL: test_atomic_load_16cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_load_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_load_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i128, i128* @it seq_cst, align 32 + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_exchange_1() { +; CHECK-LABEL: test_atomic_exchange_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s35, (,%s34) +; CHECK-NEXT: lea %s36, -256 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s35 +; CHECK-NEXT: and %s35, %s35, %s36 +; CHECK-NEXT: or %s35, 10, %s35 +; CHECK-NEXT: cas.w %s35, (%s34), %s37 +; CHECK-NEXT: brne.w %s35, %s37, .LBB20_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s35, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i8* @c, i8 10 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_exchange_2() { +; CHECK-LABEL: test_atomic_exchange_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s35, (,%s34) +; CHECK-NEXT: lea %s36, -65536 +; CHECK-NEXT: lea %s37, 28672 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s35 +; CHECK-NEXT: and %s35, %s35, %s36 +; CHECK-NEXT: or %s35, %s35, %s37 +; CHECK-NEXT: cas.w %s35, (%s34), %s38 +; CHECK-NEXT: brne.w %s35, %s38, .LBB21_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s35, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i16* @s, i16 28672 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_exchange_4() { +; CHECK-LABEL: test_atomic_exchange_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: lea %s0, 1886417008 +; CHECK-NEXT: ts1am.w %s0, (%s34), 15 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i32* @i, i32 1886417008 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_exchange_8() { +; CHECK-LABEL: test_atomic_exchange_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: lea %s35, 1886417008 +; CHECK-NEXT: lea.sl %s0, 1886417008(%s35) +; CHECK-NEXT: ts1am.l %s0, (%s34), 127 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i64* @l, i64 8102099357864587376 acquire + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_exchange_16() { +; CHECK-LABEL: test_atomic_exchange_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_exchange_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_exchange_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: lea %s34, 1886417008 +; CHECK-NEXT: lea.sl %s1, 1886417008(%s34) +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 2, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i128* @it, i128 8102099357864587376 acquire + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_compare_exchange_1(i8, i8) { +; CHECK-LABEL: test_atomic_compare_exchange_1: +; CHECK: .LBB{{[0-9]+}}_5: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s34) +; CHECK-NEXT: and %s35, %s1, (56)0 +; CHECK-NEXT: and %s36, %s0, (56)0 +; CHECK-NEXT: lea %s37, -256 +; CHECK-NEXT: and %s41, %s38, %s37 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, %s41, %s35 +; CHECK-NEXT: or %s39, %s41, %s36 +; CHECK-NEXT: cas.w %s38, (%s34), %s39 +; CHECK-NEXT: breq.w %s38, %s39, .LBB{{[0-9]+}}_3 +; CHECK-NEXT: # %partword.cmpxchg.failure +; CHECK-NEXT: # in Loop: Header=BB25_1 Depth=1 +; CHECK-NEXT: or %s40, 0, %s41 +; CHECK-NEXT: and %s41, %s38, %s37 +; CHECK-NEXT: brne.w %s40, %s41, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: .LBB{{[0-9]+}}_3: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmps.w.sx %s34, %s38, %s39 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i8* @c, i8 %0, i8 %1 seq_cst seq_cst + %3 = extractvalue { i8, i1 } %2, 1 + %frombool = zext i1 %3 to i8 + ret i8 %frombool +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_compare_exchange_2(i16, i16) { +; CHECK-LABEL: test_atomic_compare_exchange_2: +; CHECK: .LBB{{[0-9]+}}_5: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: or %s35, 2, %s34 +; FIXME: following ld2b.zx should be ldl.sx... +; CHECK-NEXT: ld2b.zx %s37, (,%s35) +; CHECK-NEXT: and %s35, %s1, (48)0 +; CHECK-NEXT: and %s36, %s0, (48)0 +; CHECK-NEXT: sla.w.sx %s41, %s37, 16 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: lea %s39, -65536 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, %s41, %s35 +; CHECK-NEXT: or %s38, %s41, %s36 +; CHECK-NEXT: cas.w %s37, (%s34), %s38 +; CHECK-NEXT: breq.w %s37, %s38, .LBB26_3 +; CHECK-NEXT: # %partword.cmpxchg.failure +; CHECK-NEXT: # in Loop: Header=BB{{[0-9]+}}_1 Depth=1 +; CHECK-NEXT: or %s40, 0, %s41 +; CHECK-NEXT: and %s41, %s37, %s39 +; CHECK-NEXT: brne.w %s40, %s41, .LBB26_1 +; CHECK-NEXT: .LBB{{[0-9]+}}_3: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmps.w.sx %s34, %s37, %s38 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i16* @s, i16 %0, i16 %1 seq_cst seq_cst + %3 = extractvalue { i16, i1 } %2, 1 + %conv = zext i1 %3 to i16 + ret i16 %conv +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_compare_exchange_4(i32, i32) { +; CHECK-LABEL: test_atomic_compare_exchange_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: cas.w %s1, (%s34), %s0 +; CHECK-NEXT: cmps.w.sx %s34, %s1, %s0 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i32* @i, i32 %0, i32 %1 seq_cst seq_cst + %3 = extractvalue { i32, i1 } %2, 1 + %conv = zext i1 %3 to i32 + ret i32 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: %2 = cmpxchg i64* @l, i64 %0, i64 %1 seq_cst seq_cst + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_compare_exchange_16(i128, i128) { +; CHECK-LABEL: test_atomic_compare_exchange_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: st %s1, -8(,%s9) +; CHECK-NEXT: st %s0, -16(,%s9) +; CHECK-NEXT: lea %s34, __atomic_compare_exchange_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_compare_exchange_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: lea %s1,-16(,%s9) +; CHECK-NEXT: or %s4, 5, (0)1 +; CHECK-NEXT: or %s5, 0, %s4 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 +; CHECK-NEXT: or %s1, 0, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i128* @it, i128 %0, i128 %1 seq_cst seq_cst + %3 = extractvalue { i128, i1 } %2, 1 + %conv = zext i1 %3 to i128 + ret i128 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_relaxed(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_relaxed: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 monotonic monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_consume(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_consume: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_acquire(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_acquire: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_release(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_release: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 release monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_acq_rel(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_acq_rel: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 acq_rel acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 seq_cst seq_cst + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_relaxed(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_relaxed: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 monotonic monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_consume(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_consume: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_acquire(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_acquire: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_release(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_release: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 release monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_acq_rel(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_acq_rel: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 acq_rel acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind readnone +define void @test_atomic_fence_relaxed() { +; CHECK-LABEL: test_atomic_fence_relaxed: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: or %s11, 0, %s9 +entry: + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_consume() { +; CHECK-LABEL: test_atomic_fence_consume: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence acquire + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_acquire() { +; CHECK-LABEL: test_atomic_fence_acquire: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence acquire + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_release() { +; CHECK-LABEL: test_atomic_fence_release: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence release + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_acq_rel() { +; CHECK-LABEL: test_atomic_fence_acq_rel: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence acq_rel + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_seq_cst() { +; CHECK-LABEL: test_atomic_fence_seq_cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence seq_cst + ret void +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_add_1() { +; CHECK-LABEL: test_atomic_fetch_add_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s36, (,%s34) +; CHECK-NEXT: lea %s35, -256 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s36 +; CHECK-NEXT: lea %s36, 1(%s36) +; CHECK-NEXT: and %s36, %s36, (56)0 +; CHECK-NEXT: and %s38, %s37, %s35 +; CHECK-NEXT: or %s36, %s38, %s36 +; CHECK-NEXT: cas.w %s36, (%s34), %s37 +; CHECK-NEXT: brne.w %s36, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s36, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_add_2() { +; CHECK-LABEL: test_atomic_fetch_add_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s36, (,%s34) +; CHECK-NEXT: lea %s35, -65536 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s36 +; CHECK-NEXT: lea %s36, 1(%s36) +; CHECK-NEXT: and %s36, %s36, (48)0 +; CHECK-NEXT: and %s38, %s37, %s35 +; CHECK-NEXT: or %s36, %s38, %s36 +; CHECK-NEXT: cas.w %s36, (%s34), %s37 +; CHECK-NEXT: brne.w %s36, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s36, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_add_4() { +; CHECK-LABEL: test_atomic_fetch_add_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, 1(%s0) +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_add_8() { +; CHECK-LABEL: test_atomic_fetch_add_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, 1(%s0) +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_add_16() { +; CHECK-LABEL: test_atomic_fetch_add_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_add_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_add_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_sub_1() { +; CHECK-LABEL: test_atomic_fetch_sub_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s36, (,%s34) +; CHECK-NEXT: lea %s35, -256 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s36 +; CHECK-NEXT: lea %s36, -1(%s36) +; CHECK-NEXT: and %s36, %s36, (56)0 +; CHECK-NEXT: and %s38, %s37, %s35 +; CHECK-NEXT: or %s36, %s38, %s36 +; CHECK-NEXT: cas.w %s36, (%s34), %s37 +; CHECK-NEXT: brne.w %s36, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s36, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_sub_2() { +; CHECK-LABEL: test_atomic_fetch_sub_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s36, (,%s34) +; CHECK-NEXT: lea %s35, -65536 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s36 +; CHECK-NEXT: lea %s36, -1(%s36) +; CHECK-NEXT: and %s36, %s36, (48)0 +; CHECK-NEXT: and %s38, %s37, %s35 +; CHECK-NEXT: or %s36, %s38, %s36 +; CHECK-NEXT: cas.w %s36, (%s34), %s37 +; CHECK-NEXT: brne.w %s36, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s36, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_sub_4() { +; CHECK-LABEL: test_atomic_fetch_sub_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, -1(%s0) +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_sub_8() { +; CHECK-LABEL: test_atomic_fetch_sub_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, -1(%s0) +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_sub_16() { +; CHECK-LABEL: test_atomic_fetch_sub_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_sub_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_sub_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_and_1() { +; CHECK-LABEL: test_atomic_fetch_and_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s34, (,%s35) +; CHECK-NEXT: lea %s36, -255 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s34 +; CHECK-NEXT: and %s34, %s34, %s36 +; CHECK-NEXT: cas.w %s34, (%s35), %s37 +; CHECK-NEXT: brne.w %s34, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_and_2() { +; CHECK-LABEL: test_atomic_fetch_and_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s34, (,%s35) +; CHECK-NEXT: lea %s36, -65535 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s34 +; CHECK-NEXT: and %s34, %s34, %s36 +; CHECK-NEXT: cas.w %s34, (%s35), %s37 +; CHECK-NEXT: brne.w %s34, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_and_4() { +; CHECK-LABEL: test_atomic_fetch_and_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: and %s0, 1, %s0 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_and_8() { +; CHECK-LABEL: test_atomic_fetch_and_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: and %s0, 1, %s0 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_and_16() { +; CHECK-LABEL: test_atomic_fetch_and_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_and_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_and_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_or_1() { +; CHECK-LABEL: test_atomic_fetch_or_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s34, (,%s35) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s34 +; CHECK-NEXT: or %s34, 1, %s34 +; CHECK-NEXT: cas.w %s34, (%s35), %s36 +; CHECK-NEXT: brne.w %s34, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_or_2() { +; CHECK-LABEL: test_atomic_fetch_or_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s34, (,%s35) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s34 +; CHECK-NEXT: or %s34, 1, %s34 +; CHECK-NEXT: cas.w %s34, (%s35), %s36 +; CHECK-NEXT: brne.w %s34, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_or_4() { +; CHECK-LABEL: test_atomic_fetch_or_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: or %s0, 1, %s0 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_or_8() { +; CHECK-LABEL: test_atomic_fetch_or_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: or %s0, 1, %s0 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_or_16() { +; CHECK-LABEL: test_atomic_fetch_or_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_or_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_or_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_xor_1() { +; CHECK-LABEL: test_atomic_fetch_xor_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s34, (,%s35) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s34 +; CHECK-NEXT: xor %s34, 1, %s34 +; CHECK-NEXT: cas.w %s34, (%s35), %s36 +; CHECK-NEXT: brne.w %s34, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_xor_2() { +; CHECK-LABEL: test_atomic_fetch_xor_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s34, (,%s35) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s34 +; CHECK-NEXT: xor %s34, 1, %s34 +; CHECK-NEXT: cas.w %s34, (%s35), %s36 +; CHECK-NEXT: brne.w %s34, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_xor_4() { +; CHECK-LABEL: test_atomic_fetch_xor_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s0, 1, %s0 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_xor_8() { +; CHECK-LABEL: test_atomic_fetch_xor_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s0, 1, %s0 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_xor_16() { +; CHECK-LABEL: test_atomic_fetch_xor_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_xor_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_xor_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_nand_1() { +; CHECK-LABEL: test_atomic_fetch_nand_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s37, (,%s34) +; CHECK-NEXT: lea %s35, 254 +; CHECK-NEXT: lea %s36, -256 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s37 +; CHECK-NEXT: xor %s37, -1, %s37 +; CHECK-NEXT: or %s37, %s37, %s35 +; CHECK-NEXT: and %s37, %s37, (56)0 +; CHECK-NEXT: and %s39, %s38, %s36 +; CHECK-NEXT: or %s37, %s39, %s37 +; CHECK-NEXT: cas.w %s37, (%s34), %s38 +; CHECK-NEXT: brne.w %s37, %s38, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s37, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_nand_2() { +; CHECK-LABEL: test_atomic_fetch_nand_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s34, -4, %s34 +; CHECK-NEXT: ldl.sx %s37, (,%s34) +; CHECK-NEXT: lea %s35, 65534 +; CHECK-NEXT: lea %s36, -65536 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s37 +; CHECK-NEXT: xor %s37, -1, %s37 +; CHECK-NEXT: or %s37, %s37, %s35 +; CHECK-NEXT: and %s37, %s37, (48)0 +; CHECK-NEXT: and %s39, %s38, %s36 +; CHECK-NEXT: or %s37, %s39, %s37 +; CHECK-NEXT: cas.w %s37, (%s34), %s38 +; CHECK-NEXT: brne.w %s37, %s38, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s37, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_nand_4() { +; CHECK-LABEL: test_atomic_fetch_nand_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s36, -1, %s0 +; CHECK-NEXT: or %s0, -2, %s36 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_nand_8() { +; CHECK-LABEL: test_atomic_fetch_nand_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s36, -1, %s0 +; CHECK-NEXT: or %s0, -2, %s36 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_nand_16() { +; CHECK-LABEL: test_atomic_fetch_nand_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_nand_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_nand_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_max_4() { +; CHECK-LABEL: test_atomic_fetch_max_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s0 +; CHECK-NEXT: maxs.w.zx %s0, %s0, %s35 +; CHECK-NEXT: cas.w %s0, (%s34), %s36 +; CHECK-NEXT: brne.w %s0, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw max i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_min_4() { +; CHECK-LABEL: test_atomic_fetch_min_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: or %s36, 2, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s0 +; CHECK-NEXT: cmps.w.sx %s38, %s0, %s36 +; CHECK-NEXT: or %s0, 0, %s35 +; CHECK-NEXT: cmov.w.lt %s0, %s37, %s38 +; CHECK-NEXT: cas.w %s0, (%s34), %s37 +; CHECK-NEXT: brne.w %s0, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw min i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_umax_4() { +; CHECK-LABEL: test_atomic_fetch_umax_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, ui@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, ui@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s0 +; CHECK-NEXT: cmpu.w %s37, %s0, %s35 +; CHECK-NEXT: or %s0, 0, %s35 +; CHECK-NEXT: cmov.w.gt %s0, %s36, %s37 +; CHECK-NEXT: cas.w %s0, (%s34), %s36 +; CHECK-NEXT: brne.w %s0, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw umax i32* @ui, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_umin_4() { +; CHECK-LABEL: test_atomic_fetch_umin_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, ui@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, ui@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: or %s36, 2, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s0 +; CHECK-NEXT: cmpu.w %s38, %s0, %s36 +; CHECK-NEXT: or %s0, 0, %s35 +; CHECK-NEXT: cmov.w.lt %s0, %s37, %s38 +; CHECK-NEXT: cas.w %s0, (%s34), %s37 +; CHECK-NEXT: brne.w %s0, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw umin i32* @ui, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_1() { +; CHECK-LABEL: test_atomic_clear_1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* @c seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_2() { +; CHECK-LABEL: test_atomic_clear_2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i16* @s to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_4() { +; CHECK-LABEL: test_atomic_clear_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i32* @i to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_8() { +; CHECK-LABEL: test_atomic_clear_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i64* @l to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_16() { +; CHECK-LABEL: test_atomic_clear_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, it@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i128* @it to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8stk(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8stk: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cas.l %s1, {{[0-9]+}}(%s11), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = alloca i64, align 32 + %3 = cmpxchg i64* %2, i64 %0, i64 %1 seq_cst seq_cst + %4 = extractvalue { i64, i1 } %3, 1 + %conv = zext i1 %4 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_8stk() { +; CHECK-LABEL: test_atomic_clear_8stk: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s34, 0, (0)1 +; CHECK-NEXT: st1b %s34, {{[0-9]+}}(,%s11) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = alloca i64, align 32 + %1 = bitcast i64* %0 to i8* + store atomic i8 0, i8* %1 seq_cst, align 32 + ret void +} diff --git a/llvm/test/CodeGen/VE/atomic_unaligned.ll b/llvm/test/CodeGen/VE/atomic_unaligned.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/atomic_unaligned.ll @@ -0,0 +1,2180 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +%struct.sci = type <{ i8, i32 }> +%struct.scl = type <{ i8, i64 }> +%struct.sil = type <{ i32, i64 }> + +@c = common global i8 0, align 1 +@s = common global i16 0, align 1 +@i = common global i32 0, align 1 +@l = common global i64 0, align 1 +@it= common global i128 0, align 1 +@ui = common global i32 0, align 1 +@sci1 = common global %struct.sci <{ i8 0, i32 0 }>, align 1 +@scl1 = common global %struct.scl <{ i8 0, i64 0 }>, align 1 +@sil1 = common global %struct.sil <{ i32 0, i64 0 }>, align 1 + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_1() { +; CHECK-LABEL: test_atomic_store_1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 12, i8* @c release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_1seq() { +; CHECK-LABEL: test_atomic_store_1seq: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 12, i8* @c seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_2() { +; CHECK-LABEL: test_atomic_store_2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st2b %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i16 12, i16* @s release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_2seq() { +; CHECK-LABEL: test_atomic_store_2seq: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st2b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i16 12, i16* @s seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_4() { +; CHECK-LABEL: test_atomic_store_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: stl %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i32 12, i32* @i release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_4cst() { +; CHECK-LABEL: test_atomic_store_4cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: stl %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i32 12, i32* @i seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_8() { +; CHECK-LABEL: test_atomic_store_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st %s35, (,%s34) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i64 12, i64* @l release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_8cst() { +; CHECK-LABEL: test_atomic_store_8cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: or %s35, 12, (0)1 +; CHECK-NEXT: st %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i64 12, i64* @l seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_16() { +; CHECK-LABEL: t_atomic_store_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_store_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_store_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 12, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 3, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i128 12, i128* @it release, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_store_16cst() { +; CHECK-LABEL: test_atomic_store_16cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_store_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_store_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 12, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i128 12, i128* @it seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_load_1() { +; CHECK-LABEL: test_atomic_load_1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: ld1b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i8, i8* @c acquire, align 32 + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_load_1cst() { +; CHECK-LABEL: test_atomic_load_1cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: ld1b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i8, i8* @c seq_cst, align 32 + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_load_2() { +; CHECK-LABEL: test_atomic_load_2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: ld2b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i16, i16* @s acquire, align 32 + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_load_2cst() { +; CHECK-LABEL: test_atomic_load_2cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: ld2b.zx %s34, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i16, i16* @s seq_cst, align 32 + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_load_4() { +; CHECK-LABEL: test_atomic_load_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.zx %s0, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i32, i32* @i acquire, align 32 + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_load_4cst() { +; CHECK-LABEL: test_atomic_load_4cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.zx %s0, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i32, i32* @i seq_cst, align 32 + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_load_8() { +; CHECK-LABEL: test_atomic_load_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i64, i64* @l acquire, align 32 + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_load_8cst() { +; CHECK-LABEL: test_atomic_load_8cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i64, i64* @l seq_cst, align 32 + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_load_16() { +; CHECK-LABEL: test_atomic_load_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_load_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_load_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 2, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i128, i128* @it acquire, align 32 + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_load_16cst() { +; CHECK-LABEL: test_atomic_load_16cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_load_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_load_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = load atomic i128, i128* @it seq_cst, align 32 + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_exchange_1() { +; CHECK-LABEL: test_atomic_exchange_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: lea %s36, 255 +; CHECK-NEXT: or %s37, 10, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: xor %s40, -1, %s40 +; CHECK-NEXT: and %s40, %s39, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: or %s38, %s40, %s38 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i8* @c, i8 10 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_exchange_2() { +; CHECK-LABEL: test_atomic_exchange_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: lea %s36, 65535 +; CHECK-NEXT: lea %s37, 28672 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: xor %s40, -1, %s40 +; CHECK-NEXT: and %s40, %s39, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: or %s38, %s40, %s38 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i16* @s, i16 28672 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_exchange_4() { +; CHECK-LABEL: test_atomic_exchange_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: lea %s0, 1886417008 +; CHECK-NEXT: ts1am.w %s0, (%s34), 15 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i32* @i, i32 1886417008 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_exchange_4_align1() { +; CHECK-LABEL: test_atomic_exchange_4_align1 +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, sci1@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, sci1@hi(%s34) +; CHECK-NEXT: lea %s0, 1886417008 +; FIXME: Bus Error occurred due to unaligned ts1am instruction +; CHECK-NEXT: ts1am.w %s0, 1(%s34), 15 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i32* getelementptr inbounds (%struct.sci, %struct.sci* @sci1, i32 0, i32 1), i32 1886417008 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_exchange_8() { +; CHECK-LABEL: test_atomic_exchange_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: lea %s35, 1886417008 +; CHECK-NEXT: lea.sl %s0, 1886417008(%s35) +; CHECK-NEXT: ts1am.l %s0, (%s34), 127 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i64* @l, i64 8102099357864587376 acquire + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_exchange_8_align1() { +; CHECK-LABEL: test_atomic_exchange_8_align1 +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, scl1@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, scl1@hi(%s34) +; CHECK-NEXT: lea %s35, 1886417008 +; CHECK-NEXT: lea.sl %s0, 1886417008(%s35) +; FIXME: Bus Error occurred due to unaligned ts1am instruction +; CHECK-NEXT: ts1am.l %s0, 1(%s34), 127 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i64* getelementptr inbounds (%struct.scl, %struct.scl* @scl1, i32 0, i32 1), i64 8102099357864587376 acquire + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_exchange_8_align4() { +; CHECK-LABEL: test_atomic_exchange_8_align4 +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, sil1@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, sil1@hi(%s34) +; CHECK-NEXT: lea %s35, 1886417008 +; CHECK-NEXT: lea.sl %s0, 1886417008(%s35) +; FIXME: Bus Error occurred due to unaligned ts1am instruction +; CHECK-NEXT: ts1am.l %s0, 4(%s34), 127 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i64* getelementptr inbounds (%struct.sil, %struct.sil* @sil1, i32 0, i32 1), i64 8102099357864587376 acquire + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_exchange_16() { +; CHECK-LABEL: test_atomic_exchange_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_exchange_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_exchange_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: lea %s34, 1886417008 +; CHECK-NEXT: lea.sl %s1, 1886417008(%s34) +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 2, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xchg i128* @it, i128 8102099357864587376 acquire + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_compare_exchange_1(i8, i8) { +; CHECK-LABEL: test_atomic_compare_exchange_1: +; CHECK: .LBB{{[0-9]+}}_5: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: and %s35, %s1, (56)0 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: adds.w.sx %s36, %s34, (0)1 +; CHECK-NEXT: and %s36, 3, %s36 +; CHECK-NEXT: sla.w.sx %s39, %s36, 3 +; CHECK-NEXT: sla.w.sx %s35, %s35, %s39 +; CHECK-NEXT: and %s36, %s0, (56)0 +; CHECK-NEXT: sla.w.sx %s36, %s36, %s39 +; CHECK-NEXT: and %s37, -4, %s34 +; CHECK-NEXT: ldl.sx %s40, (,%s37) +; CHECK-NEXT: lea %s38, 255 +; CHECK-NEXT: sla.w.sx %s39, %s38, %s39 +; CHECK-NEXT: xor %s39, -1, %s39 +; CHECK-NEXT: and %s42, %s40, %s39 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %partword.cmpxchg.loop +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, %s42, %s35 +; CHECK-NEXT: or %s40, %s42, %s36 +; CHECK-NEXT: cas.w %s39, (%s37), %s40 +; CHECK-NEXT: breq.w %s39, %s40, .LBB{{[0-9]+}}_3 +; CHECK-NEXT: # %bb.2: # %partword.cmpxchg.failure +; CHECK-NEXT: # in Loop: Header=BB{{[0-9]+}}_1 Depth=1 +; CHECK-NEXT: or %s41, 0, %s42 +; CHECK-NEXT: adds.w.sx %s42, %s34, (0)1 +; CHECK-NEXT: and %s42, 3, %s42 +; CHECK-NEXT: sla.w.sx %s42, %s42, 3 +; CHECK-NEXT: sla.w.sx %s42, %s38, %s42 +; CHECK-NEXT: xor %s42, -1, %s42 +; CHECK-NEXT: and %s42, %s39, %s42 +; CHECK-NEXT: brne.w %s41, %s42, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: .LBB{{[0-9]+}}_3: # %partword.cmpxchg.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmps.w.sx %s34, %s39, %s40 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i8* @c, i8 %0, i8 %1 seq_cst seq_cst + %3 = extractvalue { i8, i1 } %2, 1 + %frombool = zext i1 %3 to i8 + ret i8 %frombool +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_compare_exchange_2(i16, i16) { +; CHECK-LABEL: test_atomic_compare_exchange_2: +; CHECK: .LBB{{[0-9]+}}_5: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: and %s35, %s1, (48)0 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: adds.w.sx %s36, %s34, (0)1 +; CHECK-NEXT: and %s36, 3, %s36 +; CHECK-NEXT: sla.w.sx %s39, %s36, 3 +; CHECK-NEXT: sla.w.sx %s35, %s35, %s39 +; CHECK-NEXT: and %s36, %s0, (48)0 +; CHECK-NEXT: sla.w.sx %s36, %s36, %s39 +; CHECK-NEXT: and %s37, -4, %s34 +; CHECK-NEXT: ldl.sx %s40, (,%s37) +; CHECK-NEXT: lea %s38, 65535 +; CHECK-NEXT: sla.w.sx %s39, %s38, %s39 +; CHECK-NEXT: xor %s39, -1, %s39 +; CHECK-NEXT: and %s42, %s40, %s39 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %partword.cmpxchg.loop +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, %s42, %s35 +; CHECK-NEXT: or %s40, %s42, %s36 +; CHECK-NEXT: cas.w %s39, (%s37), %s40 +; CHECK-NEXT: breq.w %s39, %s40, .LBB{{[0-9]+}}_3 +; CHECK-NEXT: # %bb.2: # %partword.cmpxchg.failure +; CHECK-NEXT: # in Loop: Header=BB{{[0-9]+}}_1 Depth=1 +; CHECK-NEXT: or %s41, 0, %s42 +; CHECK-NEXT: adds.w.sx %s42, %s34, (0)1 +; CHECK-NEXT: and %s42, 3, %s42 +; CHECK-NEXT: sla.w.sx %s42, %s42, 3 +; CHECK-NEXT: sla.w.sx %s42, %s38, %s42 +; CHECK-NEXT: xor %s42, -1, %s42 +; CHECK-NEXT: and %s42, %s39, %s42 +; CHECK-NEXT: brne.w %s41, %s42, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: .LBB{{[0-9]+}}_3: # %partword.cmpxchg.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmps.w.sx %s34, %s39, %s40 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i16* @s, i16 %0, i16 %1 seq_cst seq_cst + %3 = extractvalue { i16, i1 } %2, 1 + %conv = zext i1 %3 to i16 + ret i16 %conv +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_compare_exchange_4(i32, i32) { +; CHECK-LABEL: test_atomic_compare_exchange_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: cas.w %s1, (%s34), %s0 +; CHECK-NEXT: cmps.w.sx %s34, %s1, %s0 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i32* @i, i32 %0, i32 %1 seq_cst seq_cst + %3 = extractvalue { i32, i1 } %2, 1 + %conv = zext i1 %3 to i32 + ret i32 %conv +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_compare_exchange_4_align1(i32, i32) { +; CHECK-LABEL: test_atomic_compare_exchange_4_align1 +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, sci1@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, sci1@hi(%s34) +; FIXME: Bus Error occurred due to unaligned cas instruction +; CHECK-NEXT: cas.w %s1, 1(%s34), %s0 +; CHECK-NEXT: cmps.w.sx %s34, %s1, %s0 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s0, 0, (0)1 +; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s34 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i32* getelementptr inbounds (%struct.sci, %struct.sci* @sci1, i32 0, i32 1), i32 %0, i32 %1 seq_cst seq_cst + %3 = extractvalue { i32, i1 } %2, 1 + %conv = zext i1 %3 to i32 + ret i32 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: %2 = cmpxchg i64* @l, i64 %0, i64 %1 seq_cst seq_cst + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_align1(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_align1 +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, scl1@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, scl1@hi(%s34) +; FIXME: Bus Error occurred due to unaligned cas instruction +; CHECK-NEXT: cas.l %s1, 1(%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* getelementptr inbounds (%struct.scl, %struct.scl* @scl1, i32 0, i32 1), i64 %0, i64 %1 seq_cst seq_cst + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_align4(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_align4 +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, sil1@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, sil1@hi(%s34) +; FIXME: Bus Error occurred due to unaligned cas instruction +; CHECK-NEXT: cas.l %s1, 4(%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* getelementptr inbounds (%struct.sil, %struct.sil* @sil1, i32 0, i32 1), i64 %0, i64 %1 seq_cst seq_cst + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_compare_exchange_16(i128, i128) { +; CHECK-LABEL: test_atomic_compare_exchange_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: st %s1, -8(,%s9) +; CHECK-NEXT: st %s0, -16(,%s9) +; CHECK-NEXT: lea %s34, __atomic_compare_exchange_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_compare_exchange_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: lea %s1,-16(,%s9) +; CHECK-NEXT: or %s4, 5, (0)1 +; CHECK-NEXT: or %s5, 0, %s4 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 +; CHECK-NEXT: or %s1, 0, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i128* @it, i128 %0, i128 %1 seq_cst seq_cst + %3 = extractvalue { i128, i1 } %2, 1 + %conv = zext i1 %3 to i128 + ret i128 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_relaxed(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_relaxed: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 monotonic monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_consume(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_consume: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_acquire(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_acquire: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_release(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_release: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 release monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_acq_rel(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_acq_rel: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg i64* @l, i64 %0, i64 %1 acq_rel acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 seq_cst seq_cst + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_relaxed(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_relaxed: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 monotonic monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_consume(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_consume: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_acquire(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_acquire: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 acquire acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_release(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_release: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 release monotonic + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8_weak_acq_rel(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8_weak_acq_rel: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: cas.l %s1, (%s34), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = cmpxchg weak i64* @l, i64 %0, i64 %1 acq_rel acquire + %3 = extractvalue { i64, i1 } %2, 1 + %conv = zext i1 %3 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind readnone +define void @test_atomic_fence_relaxed() { +; CHECK-LABEL: test_atomic_fence_relaxed: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: or %s11, 0, %s9 +entry: + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_consume() { +; CHECK-LABEL: test_atomic_fence_consume: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence acquire + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_acquire() { +; CHECK-LABEL: test_atomic_fence_acquire: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 2 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence acquire + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_release() { +; CHECK-LABEL: test_atomic_fence_release: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence release + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_acq_rel() { +; CHECK-LABEL: test_atomic_fence_acq_rel: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence acq_rel + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_fence_seq_cst() { +; CHECK-LABEL: test_atomic_fence_seq_cst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + fence seq_cst + ret void +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_add_1() { +; CHECK-LABEL: test_atomic_fetch_add_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 255 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: adds.w.sx %s40, %s39, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: and %s40, %s40, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_add_2() { +; CHECK-LABEL: test_atomic_fetch_add_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 65535 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: adds.w.sx %s40, %s39, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: and %s40, %s40, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_add_4() { +; CHECK-LABEL: test_atomic_fetch_add_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, 1(%s0) +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_add_8() { +; CHECK-LABEL: test_atomic_fetch_add_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, 1(%s0) +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_add_16() { +; CHECK-LABEL: test_atomic_fetch_add_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_add_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_add_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw add i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_sub_1() { +; CHECK-LABEL: test_atomic_fetch_sub_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 255 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: subs.w.sx %s40, %s39, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: and %s40, %s40, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_sub_2() { +; CHECK-LABEL: test_atomic_fetch_sub_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 65535 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: subs.w.sx %s40, %s39, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: and %s40, %s40, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_sub_4() { +; CHECK-LABEL: test_atomic_fetch_sub_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, -1(%s0) +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_sub_8() { +; CHECK-LABEL: test_atomic_fetch_sub_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: lea %s0, -1(%s0) +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_sub_16() { +; CHECK-LABEL: test_atomic_fetch_sub_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_sub_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_sub_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw sub i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_and_1() { +; CHECK-LABEL: test_atomic_fetch_and_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 255 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_and_2() { +; CHECK-LABEL: test_atomic_fetch_and_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 65535 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_and_4() { +; CHECK-LABEL: test_atomic_fetch_and_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: and %s0, 1, %s0 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_and_8() { +; CHECK-LABEL: test_atomic_fetch_and_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: and %s0, 1, %s0 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_and_16() { +; CHECK-LABEL: test_atomic_fetch_and_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_and_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_and_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw and i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_or_1() { +; CHECK-LABEL: test_atomic_fetch_or_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s37, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s37 +; CHECK-NEXT: adds.w.sx %s37, %s34, (0)1 +; CHECK-NEXT: and %s37, 3, %s37 +; CHECK-NEXT: sla.w.sx %s37, %s37, 3 +; CHECK-NEXT: sla.w.sx %s37, %s36, %s37 +; CHECK-NEXT: or %s37, %s38, %s37 +; CHECK-NEXT: cas.w %s37, (%s35), %s38 +; CHECK-NEXT: brne.w %s37, %s38, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s37, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_or_2() { +; CHECK-LABEL: test_atomic_fetch_or_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s37, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s37 +; CHECK-NEXT: adds.w.sx %s37, %s34, (0)1 +; CHECK-NEXT: and %s37, 3, %s37 +; CHECK-NEXT: sla.w.sx %s37, %s37, 3 +; CHECK-NEXT: sla.w.sx %s37, %s36, %s37 +; CHECK-NEXT: or %s37, %s38, %s37 +; CHECK-NEXT: cas.w %s37, (%s35), %s38 +; CHECK-NEXT: brne.w %s37, %s38, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s37, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_or_4() { +; CHECK-LABEL: test_atomic_fetch_or_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: or %s0, 1, %s0 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_or_8() { +; CHECK-LABEL: test_atomic_fetch_or_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: or %s0, 1, %s0 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_or_16() { +; CHECK-LABEL: test_atomic_fetch_or_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_or_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_or_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw or i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_xor_1() { +; CHECK-LABEL: test_atomic_fetch_xor_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s37, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s37 +; CHECK-NEXT: adds.w.sx %s37, %s34, (0)1 +; CHECK-NEXT: and %s37, 3, %s37 +; CHECK-NEXT: sla.w.sx %s37, %s37, 3 +; CHECK-NEXT: sla.w.sx %s37, %s36, %s37 +; CHECK-NEXT: xor %s37, %s38, %s37 +; CHECK-NEXT: cas.w %s37, (%s35), %s38 +; CHECK-NEXT: brne.w %s37, %s38, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s37, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_xor_2() { +; CHECK-LABEL: test_atomic_fetch_xor_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s37, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s38, 0, %s37 +; CHECK-NEXT: adds.w.sx %s37, %s34, (0)1 +; CHECK-NEXT: and %s37, 3, %s37 +; CHECK-NEXT: sla.w.sx %s37, %s37, 3 +; CHECK-NEXT: sla.w.sx %s37, %s36, %s37 +; CHECK-NEXT: xor %s37, %s38, %s37 +; CHECK-NEXT: cas.w %s37, (%s35), %s38 +; CHECK-NEXT: brne.w %s37, %s38, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s37, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_xor_4() { +; CHECK-LABEL: test_atomic_fetch_xor_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s0, 1, %s0 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_xor_8() { +; CHECK-LABEL: test_atomic_fetch_xor_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s0, 1, %s0 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_xor_16() { +; CHECK-LABEL: test_atomic_fetch_xor_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_xor_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_xor_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw xor i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define signext i8 @test_atomic_fetch_nand_1() { +; CHECK-LABEL: test_atomic_fetch_nand_1: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 255 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: and %s40, %s39, %s40 +; CHECK-NEXT: xor %s40, -1, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: and %s40, %s40, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 24 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i8* @c, i8 1 seq_cst + ret i8 %0 +} + +; Function Attrs: norecurse nounwind +define signext i16 @test_atomic_fetch_nand_2() { +; CHECK-LABEL: test_atomic_fetch_nand_2: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: and %s35, -4, %s34 +; CHECK-NEXT: ldl.sx %s38, (,%s35) +; CHECK-NEXT: or %s36, 1, (0)1 +; CHECK-NEXT: lea %s37, 65535 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: # %atomicrmw.start +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s39, 0, %s38 +; CHECK-NEXT: adds.w.sx %s38, %s34, (0)1 +; CHECK-NEXT: and %s38, 3, %s38 +; CHECK-NEXT: sla.w.sx %s38, %s38, 3 +; CHECK-NEXT: sla.w.sx %s40, %s36, %s38 +; CHECK-NEXT: and %s40, %s39, %s40 +; CHECK-NEXT: xor %s40, -1, %s40 +; CHECK-NEXT: sla.w.sx %s38, %s37, %s38 +; CHECK-NEXT: and %s40, %s40, %s38 +; CHECK-NEXT: xor %s38, -1, %s38 +; CHECK-NEXT: and %s38, %s39, %s38 +; CHECK-NEXT: or %s38, %s38, %s40 +; CHECK-NEXT: cas.w %s38, (%s35), %s39 +; CHECK-NEXT: brne.w %s38, %s39, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %bb.2: # %atomicrmw.end +; CHECK-NEXT: and %s35, %s38, (32)0 +; CHECK-NEXT: adds.w.sx %s34, %s34, (0)1 +; CHECK-NEXT: and %s34, 3, %s34 +; CHECK-NEXT: sla.w.sx %s34, %s34, 3 +; CHECK-NEXT: srl %s34, %s35, %s34 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: sla.w.sx %s34, %s34, 16 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i16* @s, i16 1 seq_cst + ret i16 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_nand_4() { +; CHECK-LABEL: test_atomic_fetch_nand_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s36, -1, %s0 +; CHECK-NEXT: or %s0, -2, %s36 +; CHECK-NEXT: cas.w %s0, (%s34), %s35 +; CHECK-NEXT: brne.w %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_fetch_nand_8() { +; CHECK-LABEL: test_atomic_fetch_nand_8: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: ld %s0, (,%s34) +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s35, 0, %s0 +; CHECK-NEXT: xor %s36, -1, %s0 +; CHECK-NEXT: or %s0, -2, %s36 +; CHECK-NEXT: cas.l %s0, (%s34), %s35 +; CHECK-NEXT: brne.l %s0, %s35, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i64* @l, i64 1 seq_cst + ret i64 %0 +} + +; Function Attrs: norecurse nounwind +define i128 @test_atomic_fetch_nand_16() { +; CHECK-LABEL: test_atomic_fetch_nand_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: lea %s34, __atomic_fetch_nand_16@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s12, __atomic_fetch_nand_16@hi(%s34) +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s0, it@hi(%s34) +; CHECK-NEXT: or %s1, 1, (0)1 +; CHECK-NEXT: or %s2, 0, (0)1 +; CHECK-NEXT: or %s3, 5, (0)1 +; CHECK-NEXT: bsic %lr, (,%s12) +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw nand i128* @it, i128 1 seq_cst + ret i128 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_max_4() { +; CHECK-LABEL: test_atomic_fetch_max_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s0 +; CHECK-NEXT: maxs.w.zx %s0, %s0, %s35 +; CHECK-NEXT: cas.w %s0, (%s34), %s36 +; CHECK-NEXT: brne.w %s0, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw max i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_min_4() { +; CHECK-LABEL: test_atomic_fetch_min_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: or %s36, 2, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s0 +; CHECK-NEXT: cmps.w.sx %s38, %s0, %s36 +; CHECK-NEXT: or %s0, 0, %s35 +; CHECK-NEXT: cmov.w.lt %s0, %s37, %s38 +; CHECK-NEXT: cas.w %s0, (%s34), %s37 +; CHECK-NEXT: brne.w %s0, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw min i32* @i, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_umax_4() { +; CHECK-LABEL: test_atomic_fetch_umax_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, ui@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, ui@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s36, 0, %s0 +; CHECK-NEXT: cmpu.w %s37, %s0, %s35 +; CHECK-NEXT: or %s0, 0, %s35 +; CHECK-NEXT: cmov.w.gt %s0, %s36, %s37 +; CHECK-NEXT: cas.w %s0, (%s34), %s36 +; CHECK-NEXT: brne.w %s0, %s36, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw umax i32* @ui, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define i32 @test_atomic_fetch_umin_4() { +; CHECK-LABEL: test_atomic_fetch_umin_4: +; CHECK: .LBB{{[0-9]+}}_4: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, ui@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, ui@hi(%s34) +; CHECK-NEXT: ldl.sx %s0, (,%s34) +; CHECK-NEXT: or %s35, 1, (0)1 +; CHECK-NEXT: or %s36, 2, (0)1 +; CHECK-NEXT: .LBB{{[0-9]+}}_1: +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: or %s37, 0, %s0 +; CHECK-NEXT: cmpu.w %s38, %s0, %s36 +; CHECK-NEXT: or %s0, 0, %s35 +; CHECK-NEXT: cmov.w.lt %s0, %s37, %s38 +; CHECK-NEXT: cas.w %s0, (%s34), %s37 +; CHECK-NEXT: brne.w %s0, %s37, .LBB{{[0-9]+}}_1 +; CHECK-NEXT: # %atomicrmw.end +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = atomicrmw umin i32* @ui, i32 1 seq_cst + ret i32 %0 +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_1() { +; CHECK-LABEL: test_atomic_clear_1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, c@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, c@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* @c seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_2() { +; CHECK-LABEL: test_atomic_clear_2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, s@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, s@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i16* @s to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_4() { +; CHECK-LABEL: test_atomic_clear_4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, i@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, i@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i32* @i to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_8() { +; CHECK-LABEL: test_atomic_clear_8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, l@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, l@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i64* @l to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_16() { +; CHECK-LABEL: test_atomic_clear_16: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: lea %s34, it@lo +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: lea.sl %s34, it@hi(%s34) +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: st1b %s35, (,%s34) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + store atomic i8 0, i8* bitcast (i128* @it to i8*) seq_cst, align 32 + ret void +} + +; Function Attrs: norecurse nounwind +define i64 @test_atomic_compare_exchange_8stk(i64, i64) { +; CHECK-LABEL: test_atomic_compare_exchange_8stk: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cas.l %s1, {{[0-9]+}}(%s11), %s0 +; CHECK-NEXT: cmps.l %s34, %s1, %s0 +; CHECK-NEXT: or %s35, 0, (0)1 +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: cmov.l.eq %s35, (63)0, %s34 +; CHECK-NEXT: adds.w.zx %s0, %s35, (0)1 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %2 = alloca i64, align 32 + %3 = cmpxchg i64* %2, i64 %0, i64 %1 seq_cst seq_cst + %4 = extractvalue { i64, i1 } %3, 1 + %conv = zext i1 %4 to i64 + ret i64 %conv +} + +; Function Attrs: norecurse nounwind +define void @test_atomic_clear_8stk() { +; CHECK-LABEL: test_atomic_clear_8stk: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s34, 0, (0)1 +; CHECK-NEXT: st1b %s34, {{[0-9]+}}(,%s11) +; CHECK-NEXT: fencem 3 +; CHECK-NEXT: or %s11, 0, %s9 +entry: + %0 = alloca i64, align 32 + %1 = bitcast i64* %0 to i8* + store atomic i8 0, i8* %1 seq_cst, align 32 + ret void +} diff --git a/llvm/test/CodeGen/VE/bitcast.ll b/llvm/test/CodeGen/VE/bitcast.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/bitcast.ll @@ -0,0 +1,29 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +; Function Attrs: noinline nounwind optnone +define dso_local i64 @bitcastd2l(double) { +; CHECK-LABEL: bitcastd2l: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s16, 32(,%s11) +; CHECK-NEXT: ld %s15, 24(,%s11) +; CHECK-NEXT: ld %s10, 8(,%s11) +; CHECK-NEXT: ld %s9, (,%s11) +; CHECK-NEXT: b.l (,%lr) + %2 = bitcast double %0 to i64 + ret i64 %2 +} + +; Function Attrs: noinline nounwind optnone +define dso_local double @bitcastl2d(i64) { +; CHECK-LABEL: bitcastl2d: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: or %s11, 0, %s9 +; CHECK-NEXT: ld %s16, 32(,%s11) +; CHECK-NEXT: ld %s15, 24(,%s11) +; CHECK-NEXT: ld %s10, 8(,%s11) +; CHECK-NEXT: ld %s9, (,%s11) +; CHECK-NEXT: b.l (,%lr) + %2 = bitcast i64 %0 to double + ret double %2 +} diff --git a/llvm/test/CodeGen/VE/bitreverse.ll b/llvm/test/CodeGen/VE/bitreverse.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/bitreverse.ll @@ -0,0 +1,103 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +define i64 @func1(i64) { +; CHECK-LABEL: func1: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: brv %s0, %s0 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i64 @llvm.bitreverse.i64(i64 %0) + ret i64 %2 +} + +declare i64 @llvm.bitreverse.i64(i64) + +define i32 @func2(i32) { +; CHECK-LABEL: func2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0 +; CHECK-NEXT: brv %s34, %s0 +; CHECK-NEXT: srl %s0, %s34, 32 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i32 @llvm.bitreverse.i32(i32 %0) + ret i32 %2 +} + +declare i32 @llvm.bitreverse.i32(i32) + +define signext i16 @func3(i16 signext) { +; CHECK-LABEL: func3: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0 +; CHECK-NEXT: brv %s34, %s0 +; CHECK-NEXT: srl %s34, %s34, 32 +; CHECK-NEXT: sra.w.sx %s0, %s34, 16 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i16 @llvm.bitreverse.i16(i16 %0) + ret i16 %2 +} + +declare i16 @llvm.bitreverse.i16(i16) + +define signext i8 @func4(i8 signext) { +; CHECK-LABEL: func4: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0 +; CHECK-NEXT: brv %s34, %s0 +; CHECK-NEXT: srl %s34, %s34, 32 +; CHECK-NEXT: sra.w.sx %s0, %s34, 24 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i8 @llvm.bitreverse.i8(i8 %0) + ret i8 %2 +} + +declare i8 @llvm.bitreverse.i8(i8) + +define i64 @func5(i64) { +; CHECK-LABEL: func5: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: brv %s0, %s0 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i64 @llvm.bitreverse.i64(i64 %0) + ret i64 %2 +} + +define i32 @func6(i32) { +; CHECK-LABEL: func6: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0 +; CHECK-NEXT: brv %s34, %s0 +; CHECK-NEXT: srl %s0, %s34, 32 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 + %2 = tail call i32 @llvm.bitreverse.i32(i32 %0) + ret i32 %2 +} + +define zeroext i16 @func7(i16 zeroext) { +; CHECK-LABEL: func7: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0 +; CHECK-NEXT: brv %s34, %s0 +; CHECK-NEXT: srl %s34, %s34, 32 +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: srl %s0, %s34, 16 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i16 @llvm.bitreverse.i16(i16 %0) + ret i16 %2 +} + +define zeroext i8 @func8(i8 zeroext) { +; CHECK-LABEL: func8: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: # kill: def $sw0 killed $sw0 def $sx0 +; CHECK-NEXT: brv %s34, %s0 +; CHECK-NEXT: srl %s34, %s34, 32 +; CHECK-NEXT: and %s34, %s34, (32)0 +; CHECK-NEXT: srl %s0, %s34, 24 +; CHECK-NEXT: # kill: def $sw0 killed $sw0 killed $sx0 +; CHECK-NEXT: or %s11, 0, %s9 + %2 = tail call i8 @llvm.bitreverse.i8(i8 %0) + ret i8 %2 +} + diff --git a/llvm/test/CodeGen/VE/branch1.ll b/llvm/test/CodeGen/VE/branch1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/branch1.ll @@ -0,0 +1,237 @@ +; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s + +define signext i8 @func1(i8 signext, i8 signext) { +; CHECK-LABEL: func1: +; CHECK: .LBB{{[0-9]+}}_5: +; CHECK-NEXT: brle.w %s0, %s1, .LBB{{[0-9]+}}_1 + %3 = icmp sgt i8 %0, %1 + br i1 %3, label %4, label %7 + +;