Index: clang/lib/Driver/CMakeLists.txt =================================================================== --- clang/lib/Driver/CMakeLists.txt +++ clang/lib/Driver/CMakeLists.txt @@ -65,6 +65,7 @@ ToolChains/TCE.cpp ToolChains/WebAssembly.cpp ToolChains/XCore.cpp + ToolChains/PPCLinux.cpp Types.cpp XRayArgs.cpp Index: clang/lib/Driver/Driver.cpp =================================================================== --- clang/lib/Driver/Driver.cpp +++ clang/lib/Driver/Driver.cpp @@ -38,6 +38,7 @@ #include "ToolChains/NetBSD.h" #include "ToolChains/OpenBSD.h" #include "ToolChains/PS4CPU.h" +#include "ToolChains/PPCLinux.h" #include "ToolChains/RISCVToolchain.h" #include "ToolChains/Solaris.h" #include "ToolChains/TCE.h" @@ -4576,6 +4577,11 @@ !Target.hasEnvironment()) TC = llvm::make_unique(*this, Target, Args); + else if (Target.getArch() == llvm::Triple::ppc || + Target.getArch() == llvm::Triple::ppc64 || + Target.getArch() == llvm::Triple::ppc64le) + TC = llvm::make_unique(*this, Target, + Args); else TC = llvm::make_unique(*this, Target, Args); break; Index: clang/lib/Driver/ToolChains/PPCLinux.h =================================================================== --- /dev/null +++ clang/lib/Driver/ToolChains/PPCLinux.h @@ -0,0 +1,33 @@ +//===--- PPCLinux.h - PowerPC ToolChain Implementations ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_LINUX_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_LINUX_H + +#include "Linux.h" + +namespace clang { +namespace driver { +namespace toolchains { + +class LLVM_LIBRARY_VISIBILITY PPCLinuxToolChain : public Linux { +public: + PPCLinuxToolChain(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args) + : Linux(D, Triple, Args) {} + + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; +}; + +} // end namespace toolchains +} // end namespace driver +} // end namespace clang + +#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_LINUX_H Index: clang/lib/Driver/ToolChains/PPCLinux.cpp =================================================================== --- /dev/null +++ clang/lib/Driver/ToolChains/PPCLinux.cpp @@ -0,0 +1,31 @@ +//===-- PPCLinux.cpp - PowerPC ToolChain Implementations --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PPCLinux.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/Options.h" +#include "llvm/Support/Path.h" + +using namespace clang::driver::toolchains; +using namespace llvm::opt; + +void PPCLinuxToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + // PPC wrapper headers are implementation of x86 intrinsics on PowerPC, which + // is not supported on PPC32 platform. + if (getArch() != llvm::Triple::ppc && + !DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) && + !DriverArgs.hasArg(options::OPT_nobuiltininc)) { + const Driver &D = getDriver(); + SmallString<128> P(D.ResourceDir); + llvm::sys::path::append(P, "include", "ppc_wrappers"); + addSystemInclude(DriverArgs, CC1Args, P); + } + + Linux::AddClangSystemIncludeArgs(DriverArgs, CC1Args); +} Index: clang/lib/Headers/CMakeLists.txt =================================================================== --- clang/lib/Headers/CMakeLists.txt +++ clang/lib/Headers/CMakeLists.txt @@ -122,6 +122,10 @@ cuda_wrappers/new ) +set(ppc_wrapper_files + ppc_wrappers/mmintrin.h +) + set(output_dir ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include) set(out_files) @@ -147,7 +151,7 @@ # Copy header files from the source directory to the build directory -foreach( f ${files} ${cuda_wrapper_files} ) +foreach( f ${files} ${cuda_wrapper_files} ${ppc_wrapper_files} ) copy_header_to_output_dir(${CMAKE_CURRENT_SOURCE_DIR} ${f}) endforeach( f ) Index: clang/lib/Headers/ppc_wrappers/mmintrin.h =================================================================== --- /dev/null +++ clang/lib/Headers/ppc_wrappers/mmintrin.h @@ -0,0 +1,1457 @@ +/*===---- mmintrin.h - Implementation of MMX intrinsics on PowerPC ---------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +/* Implemented from the specification included in the Intel C++ Compiler + User Guide and Reference, version 9.0. */ + +#ifndef NO_WARN_X86_INTRINSICS +/* This header file is to help porting code using Intel intrinsics + explicitly from x86_64 to powerpc64/powerpc64le. + + Since PowerPC target doesn't support native 64-bit vector type, we + typedef __m64 to 64-bit unsigned long long in MMX intrinsics, which + works well for _si64 and some _pi32 operations. + + For _pi16 and _pi8 operations, it's better to transfer __m64 into + 128-bit PowerPC vector first. Power8 introduced direct register + move instructions which helps for more efficient implementation. + + It's user's responsibility to determine if the results of such port + are acceptable or further changes are needed. Please note that much + code using Intel intrinsics CAN BE REWRITTEN in more portable and + efficient standard C or GNU C extensions with 64-bit scalar + operations, or 128-bit SSE/Altivec operations, which are more + recommended. */ +#error \ + "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error." +#endif + +#ifndef _MMINTRIN_H_INCLUDED +#define _MMINTRIN_H_INCLUDED + +#include +/* The Intel API is flexible enough that we must allow aliasing with other + vector types, and their scalar components. */ +typedef __attribute__((__aligned__(8))) unsigned long long __m64; + +typedef __attribute__((__aligned__(8))) union { + __m64 as_m64; + char as_char[8]; + signed char as_signed_char[8]; + short as_short[4]; + int as_int[2]; + long long as_long_long; + float as_float[2]; + double as_double; +} __m64_union; + +/* Empty the multimedia state. */ +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_empty(void) { + /* nothing to do on PowerPC. */ +} + +extern __inline void + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_empty(void) { + /* nothing to do on PowerPC. */ +} + +/* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi32_si64(int __i) { + return (__m64)(unsigned int)__i; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_from_int(int __i) { + return _mm_cvtsi32_si64(__i); +} + +/* Convert the lower 32 bits of the __m64 object into an integer. */ +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64_si32(__m64 __i) { + return ((int)__i); +} + +extern __inline int + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_to_int(__m64 __i) { + return _mm_cvtsi64_si32(__i); +} + +/* Convert I to a __m64 object. */ + +/* Intel intrinsic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_from_int64(long long __i) { + return (__m64)__i; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64_m64(long long __i) { + return (__m64)__i; +} + +/* Microsoft intrinsic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64x_si64(long long __i) { + return (__m64)__i; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi64x(long long __i) { + return (__m64)__i; +} + +/* Convert the __m64 object to a 64bit integer. */ + +/* Intel intrinsic. */ +extern __inline long long + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_to_int64(__m64 __i) { + return (long long)__i; +} + +extern __inline long long + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtm64_si64(__m64 __i) { + return (long long)__i; +} + +/* Microsoft intrinsic. */ +extern __inline long long + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cvtsi64_si64x(__m64 __i) { + return (long long)__i; +} + +#ifdef _ARCH_PWR8 +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with signed saturation. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_packs_pi16(__m64 __m1, __m64 __m2) { + __vector signed short vm1; + __vector signed char vresult; + + vm1 = (__vector signed short)(__vector unsigned long long) +#ifdef __LITTLE_ENDIAN__ + {__m1, __m2}; +#else + {__m2, __m1}; +#endif + vresult = vec_packs(vm1, vm1); + return (__m64)((__vector long long)vresult)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_packsswb(__m64 __m1, __m64 __m2) { + return _mm_packs_pi16(__m1, __m2); +} + +/* Pack the two 32-bit values from M1 in to the lower two 16-bit values of + the result, and the two 32-bit values from M2 into the upper two 16-bit + values of the result, all with signed saturation. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_packs_pi32(__m64 __m1, __m64 __m2) { + __vector signed int vm1; + __vector signed short vresult; + + vm1 = (__vector signed int)(__vector unsigned long long) +#ifdef __LITTLE_ENDIAN__ + {__m1, __m2}; +#else + {__m2, __m1}; +#endif + vresult = vec_packs(vm1, vm1); + return (__m64)((__vector long long)vresult)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_packssdw(__m64 __m1, __m64 __m2) { + return _mm_packs_pi32(__m1, __m2); +} + +/* Pack the four 16-bit values from M1 into the lower four 8-bit values of + the result, and the four 16-bit values from M2 into the upper four 8-bit + values of the result, all with unsigned saturation. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_packs_pu16(__m64 __m1, __m64 __m2) { + __vector unsigned char r; + __vector signed short vm1 = (__vector signed short)(__vector long long) +#ifdef __LITTLE_ENDIAN__ + {__m1, __m2}; +#else + {__m2, __m1}; +#endif + const __vector signed short __zero = {0}; + __vector __bool short __select = vec_cmplt(vm1, __zero); + r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1); + __vector __bool char packsel = vec_pack(__select, __select); + r = vec_sel(r, (const __vector unsigned char)__zero, packsel); + return (__m64)((__vector long long)r)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_packuswb(__m64 __m1, __m64 __m2) { + return _mm_packs_pu16(__m1, __m2); +} +#endif /* end ARCH_PWR8 */ + +/* Interleave the four 8-bit values from the high half of M1 with the four + 8-bit values from the high half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_mergel(a, b); + return (__m64)((__vector long long)c)[1]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[4]; + res.as_char[1] = m2.as_char[4]; + res.as_char[2] = m1.as_char[5]; + res.as_char[3] = m2.as_char[5]; + res.as_char[4] = m1.as_char[6]; + res.as_char[5] = m2.as_char[6]; + res.as_char[6] = m1.as_char[7]; + res.as_char[7] = m2.as_char[7]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckhbw(__m64 __m1, __m64 __m2) { + return _mm_unpackhi_pi8(__m1, __m2); +} + +/* Interleave the two 16-bit values from the high half of M1 with the two + 16-bit values from the high half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[2]; + res.as_short[1] = m2.as_short[2]; + res.as_short[2] = m1.as_short[3]; + res.as_short[3] = m2.as_short[3]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckhwd(__m64 __m1, __m64 __m2) { + return _mm_unpackhi_pi16(__m1, __m2); +} +/* Interleave the 32-bit value from the high half of M1 with the 32-bit + value from the high half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[1]; + res.as_int[1] = m2.as_int[1]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckhdq(__m64 __m1, __m64 __m2) { + return _mm_unpackhi_pi32(__m1, __m2); +} +/* Interleave the four 8-bit values from the low half of M1 with the four + 8-bit values from the low half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_mergel(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[0]; + res.as_char[1] = m2.as_char[0]; + res.as_char[2] = m1.as_char[1]; + res.as_char[3] = m2.as_char[1]; + res.as_char[4] = m1.as_char[2]; + res.as_char[5] = m2.as_char[2]; + res.as_char[6] = m1.as_char[3]; + res.as_char[7] = m2.as_char[3]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpcklbw(__m64 __m1, __m64 __m2) { + return _mm_unpacklo_pi8(__m1, __m2); +} +/* Interleave the two 16-bit values from the low half of M1 with the two + 16-bit values from the low half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[0]; + res.as_short[1] = m2.as_short[0]; + res.as_short[2] = m1.as_short[1]; + res.as_short[3] = m2.as_short[1]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpcklwd(__m64 __m1, __m64 __m2) { + return _mm_unpacklo_pi16(__m1, __m2); +} + +/* Interleave the 32-bit value from the low half of M1 with the 32-bit + value from the low half of M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[0]; + res.as_int[1] = m2.as_int[0]; + + return (__m64)res.as_m64; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_punpckldq(__m64 __m1, __m64 __m2) { + return _mm_unpacklo_pi32(__m1, __m2); +} + +/* Add the 8-bit values in M1 to the 8-bit values in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_add(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[0] + m2.as_char[0]; + res.as_char[1] = m1.as_char[1] + m2.as_char[1]; + res.as_char[2] = m1.as_char[2] + m2.as_char[2]; + res.as_char[3] = m1.as_char[3] + m2.as_char[3]; + res.as_char[4] = m1.as_char[4] + m2.as_char[4]; + res.as_char[5] = m1.as_char[5] + m2.as_char[5]; + res.as_char[6] = m1.as_char[6] + m2.as_char[6]; + res.as_char[7] = m1.as_char[7] + m2.as_char[7]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddb(__m64 __m1, __m64 __m2) { + return _mm_add_pi8(__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_add(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[0] + m2.as_short[0]; + res.as_short[1] = m1.as_short[1] + m2.as_short[1]; + res.as_short[2] = m1.as_short[2] + m2.as_short[2]; + res.as_short[3] = m1.as_short[3] + m2.as_short[3]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddw(__m64 __m1, __m64 __m2) { + return _mm_add_pi16(__m1, __m2); +} + +/* Add the 32-bit values in M1 to the 32-bit values in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = vec_add(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[0] + m2.as_int[0]; + res.as_int[1] = m1.as_int[1] + m2.as_int[1]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddd(__m64 __m1, __m64 __m2) { + return _mm_add_pi32(__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_sub(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = m1.as_char[0] - m2.as_char[0]; + res.as_char[1] = m1.as_char[1] - m2.as_char[1]; + res.as_char[2] = m1.as_char[2] - m2.as_char[2]; + res.as_char[3] = m1.as_char[3] - m2.as_char[3]; + res.as_char[4] = m1.as_char[4] - m2.as_char[4]; + res.as_char[5] = m1.as_char[5] - m2.as_char[5]; + res.as_char[6] = m1.as_char[6] - m2.as_char[6]; + res.as_char[7] = m1.as_char[7] - m2.as_char[7]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubb(__m64 __m1, __m64 __m2) { + return _mm_sub_pi8(__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_sub(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = m1.as_short[0] - m2.as_short[0]; + res.as_short[1] = m1.as_short[1] - m2.as_short[1]; + res.as_short[2] = m1.as_short[2] - m2.as_short[2]; + res.as_short[3] = m1.as_short[3] - m2.as_short[3]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubw(__m64 __m1, __m64 __m2) { + return _mm_sub_pi16(__m1, __m2); +} + +/* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = vec_sub(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = m1.as_int[0] - m2.as_int[0]; + res.as_int[1] = m1.as_int[1] - m2.as_int[1]; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubd(__m64 __m1, __m64 __m2) { + return _mm_sub_pi32(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_add_si64(__m64 __m1, __m64 __m2) { + return (__m1 + __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sub_si64(__m64 __m1, __m64 __m2) { + return (__m1 - __m2); +} + +/* Shift the 64-bit value in M left by COUNT. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sll_si64(__m64 __m, __m64 __count) { + return (__m << __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllq(__m64 __m, __m64 __count) { + return _mm_sll_si64(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_slli_si64(__m64 __m, const int __count) { + return (__m << __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllqi(__m64 __m, const int __count) { + return _mm_slli_si64(__m, __count); +} + +/* Shift the 64-bit value in M left by COUNT; shift in zeros. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srl_si64(__m64 __m, __m64 __count) { + return (__m >> __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlq(__m64 __m, __m64 __count) { + return _mm_srl_si64(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srli_si64(__m64 __m, const int __count) { + return (__m >> __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlqi(__m64 __m, const int __count) { + return _mm_srli_si64(__m, __count); +} + +/* Bit-wise AND the 64-bit values in M1 and M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_and_si64(__m64 __m1, __m64 __m2) { + return (__m1 & __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pand(__m64 __m1, __m64 __m2) { + return _mm_and_si64(__m1, __m2); +} + +/* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the + 64-bit value in M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_andnot_si64(__m64 __m1, __m64 __m2) { + return (~__m1 & __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pandn(__m64 __m1, __m64 __m2) { + return _mm_andnot_si64(__m1, __m2); +} + +/* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_or_si64(__m64 __m1, __m64 __m2) { + return (__m1 | __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_por(__m64 __m1, __m64 __m2) { + return _mm_or_si64(__m1, __m2); +} + +/* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_xor_si64(__m64 __m1, __m64 __m2) { + return (__m1 ^ __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pxor(__m64 __m1, __m64 __m2) { + return _mm_xor_si64(__m1, __m2); +} + +/* Creates a 64-bit zero. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setzero_si64(void) { + return (__m64)0; +} + +/* Compare eight 8-bit values. The result of the comparison is 0xFF if the + test is true and zero if false. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { +#if defined(_ARCH_PWR6) && defined(__powerpc64__) + __m64 res; + __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :); + return (res); +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0; + res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0; + res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0; + res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0; + res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0; + res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0; + res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0; + res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpeqb(__m64 __m1, __m64 __m2) { + return _mm_cmpeq_pi8(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = (__vector signed char)vec_cmpgt(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0; + res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0; + res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0; + res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0; + res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0; + res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0; + res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0; + res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpgtb(__m64 __m1, __m64 __m2) { + return _mm_cmpgt_pi8(__m1, __m2); +} + +/* Compare four 16-bit values. The result of the comparison is 0xFFFF if + the test is true and zero if false. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = (__vector signed short)vec_cmpeq(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0; + res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0; + res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0; + res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpeqw(__m64 __m1, __m64 __m2) { + return _mm_cmpeq_pi16(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR8 + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = (__vector signed short)vec_cmpgt(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0; + res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0; + res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0; + res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpgtw(__m64 __m1, __m64 __m2) { + return _mm_cmpgt_pi16(__m1, __m2); +} + +/* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if + the test is true and zero if false. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = (__vector signed int)vec_cmpeq(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0; + res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpeqd(__m64 __m1, __m64 __m2) { + return _mm_cmpeq_pi32(__m1, __m2); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { +#if _ARCH_PWR9 + __vector signed int a, b, c; + + a = (__vector signed int)vec_splats(__m1); + b = (__vector signed int)vec_splats(__m2); + c = (__vector signed int)vec_cmpgt(a, b); + return (__m64)((__vector long long)c)[0]; +#else + __m64_union m1, m2, res; + + m1.as_m64 = __m1; + m2.as_m64 = __m2; + + res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0; + res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0; + + return (__m64)res.as_m64; +#endif +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pcmpgtd(__m64 __m1, __m64 __m2) { + return _mm_cmpgt_pi32(__m1, __m2); +} + +#if _ARCH_PWR8 +/* Add the 8-bit values in M1 to the 8-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pi8(__m64 __m1, __m64 __m2) { + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddsb(__m64 __m1, __m64 __m2) { + return _mm_adds_pi8(__m1, __m2); +} +/* Add the 16-bit values in M1 to the 16-bit values in M2 using signed + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddsw(__m64 __m1, __m64 __m2) { + return _mm_adds_pi16(__m1, __m2); +} +/* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pu8(__m64 __m1, __m64 __m2) { + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddusb(__m64 __m1, __m64 __m2) { + return _mm_adds_pu8(__m1, __m2); +} + +/* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned + saturated arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_adds_pu16(__m64 __m1, __m64 __m2) { + __vector unsigned short a, b, c; + + a = (__vector unsigned short)vec_splats(__m1); + b = (__vector unsigned short)vec_splats(__m2); + c = vec_adds(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_paddusw(__m64 __m1, __m64 __m2) { + return _mm_adds_pu16(__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed + saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pi8(__m64 __m1, __m64 __m2) { + __vector signed char a, b, c; + + a = (__vector signed char)vec_splats(__m1); + b = (__vector signed char)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubsb(__m64 __m1, __m64 __m2) { + return _mm_subs_pi8(__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + signed saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubsw(__m64 __m1, __m64 __m2) { + return _mm_subs_pi16(__m1, __m2); +} + +/* Subtract the 8-bit values in M2 from the 8-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pu8(__m64 __m1, __m64 __m2) { + __vector unsigned char a, b, c; + + a = (__vector unsigned char)vec_splats(__m1); + b = (__vector unsigned char)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubusb(__m64 __m1, __m64 __m2) { + return _mm_subs_pu8(__m1, __m2); +} + +/* Subtract the 16-bit values in M2 from the 16-bit values in M1 using + unsigned saturating arithmetic. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_subs_pu16(__m64 __m1, __m64 __m2) { + __vector unsigned short a, b, c; + + a = (__vector unsigned short)vec_splats(__m1); + b = (__vector unsigned short)vec_splats(__m2); + c = vec_subs(a, b); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psubusw(__m64 __m1, __m64 __m2) { + return _mm_subs_pu16(__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing + four 32-bit intermediate results, which are then summed by pairs to + produce two 32-bit results. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_madd_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b; + __vector signed int c; + __vector signed int zero = {0, 0, 0, 0}; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = vec_vmsumshm(a, b, zero); + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pmaddwd(__m64 __m1, __m64 __m2) { + return _mm_madd_pi16(__m1, __m2); +} +/* Multiply four signed 16-bit values in M1 by four signed 16-bit values in + M2 and produce the high 16 bits of the 32-bit results. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_mulhi_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b; + __vector signed short c; + __vector signed int w0, w1; + __vector unsigned char xform1 = { +#ifdef __LITTLE_ENDIAN__ + 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A, + 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F +#else + 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00, + 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15 +#endif + }; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + + w0 = vec_vmulesh(a, b); + w1 = vec_vmulosh(a, b); + c = (__vector signed short)vec_perm(w0, w1, xform1); + + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pmulhw(__m64 __m1, __m64 __m2) { + return _mm_mulhi_pi16(__m1, __m2); +} + +/* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce + the low 16 bits of the results. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_mullo_pi16(__m64 __m1, __m64 __m2) { + __vector signed short a, b, c; + + a = (__vector signed short)vec_splats(__m1); + b = (__vector signed short)vec_splats(__m2); + c = a * b; + return (__m64)((__vector long long)c)[0]; +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pmullw(__m64 __m1, __m64 __m2) { + return _mm_mullo_pi16(__m1, __m2); +} + +/* Shift four 16-bit values in M left by COUNT. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sll_pi16(__m64 __m, __m64 __count) { + __vector signed short m, r; + __vector unsigned short c; + + if (__count <= 15) { + m = (__vector signed short)vec_splats(__m); + c = (__vector unsigned short)vec_splats((unsigned short)__count); + r = vec_sl(m, (__vector unsigned short)c); + return (__m64)((__vector long long)r)[0]; + } else + return (0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllw(__m64 __m, __m64 __count) { + return _mm_sll_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_slli_pi16(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sll_pi16. */ + return _mm_sll_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psllwi(__m64 __m, int __count) { + return _mm_slli_pi16(__m, __count); +} + +/* Shift two 32-bit values in M left by COUNT. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sll_pi32(__m64 __m, __m64 __count) { + __m64_union m, res; + + m.as_m64 = __m; + + res.as_int[0] = m.as_int[0] << __count; + res.as_int[1] = m.as_int[1] << __count; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pslld(__m64 __m, __m64 __count) { + return _mm_sll_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_slli_pi32(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sll_pi32. */ + return _mm_sll_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_pslldi(__m64 __m, int __count) { + return _mm_slli_pi32(__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sra_pi16(__m64 __m, __m64 __count) { + __vector signed short m, r; + __vector unsigned short c; + + if (__count <= 15) { + m = (__vector signed short)vec_splats(__m); + c = (__vector unsigned short)vec_splats((unsigned short)__count); + r = vec_sra(m, (__vector unsigned short)c); + return (__m64)((__vector long long)r)[0]; + } else + return (0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psraw(__m64 __m, __m64 __count) { + return _mm_sra_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srai_pi16(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sra_pi32. */ + return _mm_sra_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrawi(__m64 __m, int __count) { + return _mm_srai_pi16(__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_sra_pi32(__m64 __m, __m64 __count) { + __m64_union m, res; + + m.as_m64 = __m; + + res.as_int[0] = m.as_int[0] >> __count; + res.as_int[1] = m.as_int[1] >> __count; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrad(__m64 __m, __m64 __count) { + return _mm_sra_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srai_pi32(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sra_pi32. */ + return _mm_sra_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psradi(__m64 __m, int __count) { + return _mm_srai_pi32(__m, __count); +} + +/* Shift four 16-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srl_pi16(__m64 __m, __m64 __count) { + __vector unsigned short m, r; + __vector unsigned short c; + + if (__count <= 15) { + m = (__vector unsigned short)vec_splats(__m); + c = (__vector unsigned short)vec_splats((unsigned short)__count); + r = vec_sr(m, (__vector unsigned short)c); + return (__m64)((__vector long long)r)[0]; + } else + return (0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlw(__m64 __m, __m64 __count) { + return _mm_srl_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srli_pi16(__m64 __m, int __count) { + /* Promote int to long then invoke mm_sra_pi32. */ + return _mm_srl_pi16(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrlwi(__m64 __m, int __count) { + return _mm_srli_pi16(__m, __count); +} + +/* Shift two 32-bit values in M right by COUNT; shift in zeros. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srl_pi32(__m64 __m, __m64 __count) { + __m64_union m, res; + + m.as_m64 = __m; + + res.as_int[0] = (unsigned int)m.as_int[0] >> __count; + res.as_int[1] = (unsigned int)m.as_int[1] >> __count; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrld(__m64 __m, __m64 __count) { + return _mm_srl_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_srli_pi32(__m64 __m, int __count) { + /* Promote int to long then invoke mm_srl_pi32. */ + return _mm_srl_pi32(__m, __count); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _m_psrldi(__m64 __m, int __count) { + return _mm_srli_pi32(__m, __count); +} +#endif /* _ARCH_PWR8 */ + +/* Creates a vector of two 32-bit values; I0 is least significant. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi32(int __i1, int __i0) { + __m64_union res; + + res.as_int[0] = __i0; + res.as_int[1] = __i1; + return (res.as_m64); +} + +/* Creates a vector of four 16-bit values; W0 is least significant. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi16(short __w3, short __w2, short __w1, short __w0) { + __m64_union res; + + res.as_short[0] = __w0; + res.as_short[1] = __w1; + res.as_short[2] = __w2; + res.as_short[3] = __w3; + return (res.as_m64); +} + +/* Creates a vector of eight 8-bit values; B0 is least significant. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, + char __b2, char __b1, char __b0) { + __m64_union res; + + res.as_char[0] = __b0; + res.as_char[1] = __b1; + res.as_char[2] = __b2; + res.as_char[3] = __b3; + res.as_char[4] = __b4; + res.as_char[5] = __b5; + res.as_char[6] = __b6; + res.as_char[7] = __b7; + return (res.as_m64); +} + +/* Similar, but with the arguments in reverse order. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setr_pi32(int __i0, int __i1) { + __m64_union res; + + res.as_int[0] = __i0; + res.as_int[1] = __i1; + return (res.as_m64); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) { + return _mm_set_pi16(__w3, __w2, __w1, __w0); +} + +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, + char __b5, char __b6, char __b7) { + return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/* Creates a vector of two 32-bit values, both elements containing I. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set1_pi32(int __i) { + __m64_union res; + + res.as_int[0] = __i; + res.as_int[1] = __i; + return (res.as_m64); +} + +/* Creates a vector of four 16-bit values, all elements containing W. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set1_pi16(short __w) { +#if _ARCH_PWR9 + __vector signed short w; + + w = (__vector signed short)vec_splats(__w); + return (__m64)((__vector long long)w)[0]; +#else + __m64_union res; + + res.as_short[0] = __w; + res.as_short[1] = __w; + res.as_short[2] = __w; + res.as_short[3] = __w; + return (res.as_m64); +#endif +} + +/* Creates a vector of eight 8-bit values, all elements containing B. */ +extern __inline __m64 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_set1_pi8(signed char __b) { +#if _ARCH_PWR8 + __vector signed char b; + + b = (__vector signed char)vec_splats(__b); + return (__m64)((__vector long long)b)[0]; +#else + __m64_union res; + + res.as_char[0] = __b; + res.as_char[1] = __b; + res.as_char[2] = __b; + res.as_char[3] = __b; + res.as_char[4] = __b; + res.as_char[5] = __b; + res.as_char[6] = __b; + res.as_char[7] = __b; + return (res.as_m64); +#endif +} +#endif /* _MMINTRIN_H_INCLUDED */ Index: clang/test/CodeGen/ppc-mmintrin.c =================================================================== --- /dev/null +++ clang/test/CodeGen/ppc-mmintrin.c @@ -0,0 +1,60 @@ +// REQUIRES: powerpc-registered-target + +// RUN: %clang -S -emit-llvm -DNO_WARN_X86_INTRINSICS -mcpu=pwr8 -target powerpc64-gnu-linux %s \ +// RUN: -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,CHECK-BE +// RUN: %clang -S -emit-llvm -DNO_WARN_X86_INTRINSICS -mcpu=pwr8 -target powerpc64le-gnu-linux %s \ +// RUN: -mllvm -disable-llvm-optzns -o - | llvm-cxxfilt | FileCheck %s --check-prefixes=CHECK,CHECK-LE + +#include + +unsigned long long int ull1, ull2; +__m64 m1, m2, res; + +void __attribute__((noinline)) +test_packs() { + res = _mm_packs_pu16((__m64)ull1, (__m64)ull2); + res = _mm_packs_pi16((__m64)ull1, (__m64)ull2); + res = _mm_packs_pi32((__m64)ull1, (__m64)ull2); +} + +// CHECK-LABEL: @test_packs + +// CHECK: define available_externally i64 @_mm_packs_pu16(i64 [[REG1:[0-9a-zA-Z_%.]+]], i64 [[REG2:[0-9a-zA-Z_%.]+]]) +// CHECK: store i64 [[REG1]], i64* [[REG3:[0-9a-zA-Z_%.]+]], align 8 +// CHECK-NEXT: store i64 [[REG2]], i64* [[REG4:[0-9a-zA-Z_%.]+]], align 8 +// CHECK-LE: load i64, i64* [[REG3]], align 8 +// CHECK: load i64, i64* [[REG4]], align 8 +// CHECK-BE: load i64, i64* [[REG3]], align 8 +// CHECK: [[REG5:[0-9a-zA-Z_%.]+]] = call <8 x i16> @vec_cmplt +// CHECK-NEXT: store <8 x i16> [[REG5]], <8 x i16>* [[REG6:[0-9a-zA-Z_%.]+]], align 16 +// CHECK-NEXT: [[REG7:[0-9a-zA-Z_%.]+]] = load <8 x i16>, <8 x i16>* [[REG8:[0-9a-zA-Z_%.]+]], align 16 +// CHECK-NEXT: [[REG9:[0-9a-zA-Z_%.]+]] = load <8 x i16>, <8 x i16>* [[REG8]], align 16 +// CHECK-NEXT: [[REG10:[0-9a-zA-Z_%.]+]] = call <16 x i8> @vec_packs(unsigned short vector[8], unsigned short vector[8])(<8 x i16> [[REG7]], <8 x i16> [[REG9]]) +// CHECK-NEXT: store <16 x i8> [[REG10]], <16 x i8>* [[REG11:[0-9a-zA-Z_%.]+]], align 16 +// CHECK-NEXT: [[REG12:[0-9a-zA-Z_%.]+]] = load <8 x i16>, <8 x i16>* [[REG6]], align 16 +// CHECK-NEXT: [[REG13:[0-9a-zA-Z_%.]+]] = load <8 x i16>, <8 x i16>* [[REG6]], align 16 +// CHECK-NEXT: [[REG14:[0-9a-zA-Z_%.]+]] = call <16 x i8> @vec_pack(bool vector[8], bool vector[8])(<8 x i16> [[REG12]], <8 x i16> [[REG13]]) +// CHECK-NEXT: store <16 x i8> [[REG14]], <16 x i8>* [[REG15:[0-9a-zA-Z_%.]+]], align 16 +// CHECK-NEXT: [[REG16:[0-9a-zA-Z_%.]+]] = load <16 x i8>, <16 x i8>* [[REG11]], align 16 +// CHECK-NEXT: [[REG17:[0-9a-zA-Z_%.]+]] = load <16 x i8>, <16 x i8>* [[REG15]], align 16 +// CHECK-NEXT: call <16 x i8> @vec_sel(unsigned char vector[16], unsigned char vector[16], bool vector[16])(<16 x i8> [[REG16]], <16 x i8> zeroinitializer, <16 x i8> [[REG17]]) + +// CHECK: define available_externally i64 @_mm_packs_pi16(i64 [[REG18:[0-9a-zA-Z_%.]+]], i64 [[REG19:[0-9a-zA-Z_%.]+]]) +// CHECK: store i64 [[REG18]], i64* [[REG20:[0-9a-zA-Z_%.]+]], align 8 +// CHECK-NEXT: store i64 [[REG19]], i64* [[REG21:[0-9a-zA-Z_%.]+]], align 8 +// CHECK-LE: load i64, i64* [[REG20]], align 8 +// CHECK: load i64, i64* [[REG21]], align 8 +// CHECK-BE: load i64, i64* [[REG20]], align 8 +// CHECK: [[REG22:[0-9a-zA-Z_%.]+]] = load <8 x i16>, <8 x i16>* [[REG23:[0-9a-zA-Z_%.]+]], align 16 +// CHECK-NEXT: [[REG24:[0-9a-zA-Z_%.]+]] = load <8 x i16>, <8 x i16>* [[REG23]], align 16 +// CHECK-NEXT: call <16 x i8> @vec_packs(short vector[8], short vector[8])(<8 x i16> [[REG22]], <8 x i16> [[REG24]]) + +// CHECK: define available_externally i64 @_mm_packs_pi32(i64 [[REG25:[0-9a-zA-Z_%.]+]], i64 [[REG26:[0-9a-zA-Z_%.]+]]) +// CHECK: store i64 [[REG25]], i64* [[REG27:[0-9a-zA-Z_%.]+]], align 8 +// CHECK-NEXT: store i64 [[REG26]], i64* [[REG28:[0-9a-zA-Z_%.]+]], align 8 +// CHECK-LE: load i64, i64* [[REG27]], align 8 +// CHECK: load i64, i64* [[REG28]], align 8 +// CHECK-BE: load i64, i64* [[REG27]], align 8 +// CHECK: [[REG29:[0-9a-zA-Z_%.]+]] = load <4 x i32>, <4 x i32>* [[REG30:[0-9a-zA-Z_%.]+]], align 16 +// CHECK-NEXT: [[REG31:[0-9a-zA-Z_%.]+]] = load <4 x i32>, <4 x i32>* [[REG30]], align 16 +// CHECK-NEXT: call <8 x i16> @vec_packs(int vector[4], int vector[4])(<4 x i32> [[REG29]], <4 x i32> [[REG31]]) Index: clang/test/Headers/ppc-intrinsics.c =================================================================== --- /dev/null +++ clang/test/Headers/ppc-intrinsics.c @@ -0,0 +1,13 @@ +// REQUIRES: powerpc-registered-target + +// RUN: %clang -S -emit-llvm -DNO_WARN_X86_INTRINSICS -target powerpc64-gnu-linux %s -Xclang -verify -o - | FileCheck %s +// RUN: %clang -S -emit-llvm -DNO_WARN_X86_INTRINSICS -target powerpc64-gnu-linux %s -Xclang -verify -x c++ -o - | FileCheck %s +// expected-no-diagnostics + +// RUN: not %clang -S -emit-llvm -target powerpc64-gnu-linux %s -o /dev/null 2>&1 | FileCheck %s -check-prefix=CHECK-ERROR + +#include +// CHECK-ERROR: mmintrin.h:{{[0-9]+}}:{{[0-9]+}}: error: "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error." + +// CHECK: target triple = "powerpc64- +// CHECK: !llvm.module.flags =