Index: clang/lib/Headers/arm_acle.h =================================================================== --- clang/lib/Headers/arm_acle.h +++ clang/lib/Headers/arm_acle.h @@ -247,7 +247,6 @@ /* * 9.3 16-bit multiplications */ -#if __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) __smulbb(int32_t __a, int32_t __b) { return __builtin_arm_smulbb(__a, __b); @@ -272,7 +271,6 @@ __smulwt(int32_t __a, int32_t __b) { return __builtin_arm_smulwt(__a, __b); } -#endif /* * 9.4 Saturating intrinsics @@ -281,13 +279,10 @@ * intrinsics are implemented and the flag is enabled. */ /* 9.4.1 Width-specified saturation intrinsics */ -#if __ARM_FEATURE_SAT #define __ssat(x, y) __builtin_arm_ssat(x, y) #define __usat(x, y) __builtin_arm_usat(x, y) -#endif /* 9.4.2 Saturating addition and subtraction intrinsics */ -#if __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __qadd(int32_t __t, int32_t __v) { return __builtin_arm_qadd(__t, __v); @@ -302,10 +297,8 @@ __qdbl(int32_t __t) { return __builtin_arm_qadd(__t, __t); } -#endif /* 9.4.3 Accumultating multiplications */ -#if __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlabb(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlabb(__a, __b, __c); @@ -330,17 +323,13 @@ __smlawt(int32_t __a, int32_t __b, int32_t __c) { return __builtin_arm_smlawt(__a, __b, __c); } -#endif /* 9.5.4 Parallel 16-bit saturation */ -#if __ARM_FEATURE_SIMD32 #define __ssat16(x, y) __builtin_arm_ssat16(x, y) #define __usat16(x, y) __builtin_arm_usat16(x, y) -#endif /* 9.5.5 Packing and unpacking */ -#if __ARM_FEATURE_SIMD32 typedef int32_t int8x4_t; typedef int32_t int16x2_t; typedef uint32_t uint8x4_t; @@ -362,18 +351,14 @@ __uxtb16(int8x4_t __a) { return __builtin_arm_uxtb16(__a); } -#endif /* 9.5.6 Parallel selection */ -#if __ARM_FEATURE_SIMD32 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) __sel(uint8x4_t __a, uint8x4_t __b) { return __builtin_arm_sel(__a, __b); } -#endif /* 9.5.7 Parallel 8-bit addition and subtraction */ -#if __ARM_FEATURE_SIMD32 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) __qadd8(int8x4_t __a, int8x4_t __b) { return __builtin_arm_qadd8(__a, __b); @@ -422,10 +407,8 @@ __usub8(uint8x4_t __a, uint8x4_t __b) { return __builtin_arm_usub8(__a, __b); } -#endif /* 9.5.8 Sum of 8-bit absolute differences */ -#if __ARM_FEATURE_SIMD32 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __usad8(uint8x4_t __a, uint8x4_t __b) { return __builtin_arm_usad8(__a, __b); @@ -434,10 +417,8 @@ __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) { return __builtin_arm_usada8(__a, __b, __c); } -#endif /* 9.5.9 Parallel 16-bit addition and subtraction */ -#if __ARM_FEATURE_SIMD32 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) __qadd16(int16x2_t __a, int16x2_t __b) { return __builtin_arm_qadd16(__a, __b); @@ -534,10 +515,8 @@ __usub16(uint16x2_t __a, uint16x2_t __b) { return __builtin_arm_usub16(__a, __b); } -#endif /* 9.5.10 Parallel 16-bit multiplications */ -#if __ARM_FEATURE_SIMD32 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { return __builtin_arm_smlad(__a, __b, __c); @@ -586,10 +565,8 @@ __smusdx(int16x2_t __a, int16x2_t __b) { return __builtin_arm_smusdx(__a, __b); } -#endif /* 9.7 CRC32 intrinsics */ -#if __ARM_FEATURE_CRC32 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32b(uint32_t __a, uint8_t __b) { return __builtin_arm_crc32b(__a, __b); @@ -629,10 +606,9 @@ __crc32cd(uint32_t __a, uint64_t __b) { return __builtin_arm_crc32cd(__a, __b); } -#endif /* Armv8.3-A Javascript conversion intrinsic */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT) +#if __ARM_64BIT_STATE static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __jcvt(double __a) { return __builtin_arm_jcvt(__a); @@ -640,7 +616,7 @@ #endif /* Armv8.5-A FP rounding intrinsics */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT) +#if __ARM_64BIT_STATE static __inline__ float __attribute__((__always_inline__, __nodebug__)) __frint32zf(float __a) { return __builtin_arm_frint32zf(__a); @@ -683,7 +659,7 @@ #endif /* Armv8.7-A load/store 64-byte intrinsics */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64) +#if __ARM_64BIT_STATE typedef struct { uint64_t val[8]; } data512_t; @@ -721,23 +697,18 @@ #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) /* Memory Tagging Extensions (MTE) Intrinsics */ -#if __ARM_FEATURE_MEMORY_TAGGING #define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) #define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) #define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded) #define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr) #define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) -#endif /* Memory Operations Intrinsics */ -#if __ARM_FEATURE_MOPS && __ARM_FEATURE_MEMORY_TAGGING #define __arm_mops_memset_tag(__tagged_address, __value, __size) \ __builtin_arm_mops_memset_tag(__tagged_address, __value, __size) -#endif /* Transactional Memory Extension (TME) Intrinsics */ -#if __ARM_FEATURE_TME #define _TMFAILURE_REASON 0x00007fffu #define _TMFAILURE_RTRY 0x00008000u @@ -756,10 +727,8 @@ #define __tcancel(__arg) __builtin_arm_tcancel(__arg) #define __ttest() __builtin_arm_ttest() -#endif /* __ARM_FEATURE_TME */ - /* Armv8.5-A Random number generation intrinsics */ -#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG) +#if __ARM_64BIT_STATE static __inline__ int __attribute__((__always_inline__, __nodebug__)) __rndr(uint64_t *__p) { return __builtin_arm_rndr(__p);