Index: include/clang/Basic/BuiltinsX86.def =================================================================== --- include/clang/Basic/BuiltinsX86.def +++ include/clang/Basic/BuiltinsX86.def @@ -450,9 +450,6 @@ BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dIc", "") BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fIc", "") BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8iIc", "") -BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "") -BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "") -BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "") BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "") BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "") BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "") Index: lib/Headers/avxintrin.h =================================================================== --- lib/Headers/avxintrin.h +++ lib/Headers/avxintrin.h @@ -472,22 +472,6 @@ } #endif -/* Vector insert */ -#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \ - __m256d __V1 = (V1); \ - __m128d __V2 = (V2); \ - (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); }) - -#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \ - __m256 __V1 = (V1); \ - __m128 __V2 = (V2); \ - (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); }) - -#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m128i __V2 = (V2); \ - (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); }) - static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_insert_epi32(__m256i __a, int __b, int const __imm) { @@ -1166,6 +1150,34 @@ return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); } +/* Vector insert */ +static __inline __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_insertf128_ps(__m256 __a, __m128 __b, const int __imm) +{ + __m256 __c = _mm256_castps128_ps256(__b); + return (__imm == 1) ? + __builtin_shufflevector(__a, __c, 0, 1, 2, 3, 8, 9, 10, 11) : + __builtin_shufflevector(__a, __c, 8, 9, 10, 11, 4, 5, 6, 7); +} + +static __inline __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_insertf128_pd(__m256d __a, __m128d __b, const int __imm) +{ + __m256d __c = _mm256_castpd128_pd256(__b); + return (__imm == 1) ? + __builtin_shufflevector(__a, __c, 0, 1, 4, 5) : + __builtin_shufflevector(__a, __c, 4, 5, 2, 3); +} + +static __inline __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_insertf128_si256(__m256i __a, __m128i __b, const int __imm) +{ + __m256i __c = _mm256_castsi128_si256(__b); + return (__imm == 1) ? + __builtin_shufflevector(__a, __c, 0, 1, 4, 5) : + __builtin_shufflevector(__a, __c, 4, 5, 2, 3); +} + /* SIMD load ops (unaligned) */ static __inline __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) Index: lib/Sema/SemaChecking.cpp =================================================================== --- lib/Sema/SemaChecking.cpp +++ lib/Sema/SemaChecking.cpp @@ -882,9 +882,6 @@ case X86::BI__builtin_ia32_vextractf128_ps256: case X86::BI__builtin_ia32_vextractf128_si256: case X86::BI__builtin_ia32_extract128i256: i = 1, l = 0, u = 1; break; - case X86::BI__builtin_ia32_vinsertf128_pd256: - case X86::BI__builtin_ia32_vinsertf128_ps256: - case X86::BI__builtin_ia32_vinsertf128_si256: case X86::BI__builtin_ia32_insert128i256: i = 2, l = 0; u = 1; break; case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break; case X86::BI__builtin_ia32_vpermil2pd: Index: test/CodeGen/avx-shuffle-builtins.c =================================================================== --- test/CodeGen/avx-shuffle-builtins.c +++ test/CodeGen/avx-shuffle-builtins.c @@ -97,3 +97,22 @@ // CHECK: insertelement <8 x float> {{.*}}, i32 7 return _mm256_broadcast_ss(__a); } + +__m256d test_mm256_insertf128_ps(__m256 a, __m128 b) { + // Check if the mask is correct + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_ps(a, b, 1); +} + +__m256d test_mm256_insertf128_pd(__m256d a, __m128d b) { + // Check if the mask is correct + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_pd(a, b, 1); +} + +__m256d test_mm256_insertf128_si256(__m256i a, __m128i b) { + // Check if the mask is correct + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_si256(a, b, 1); +} + Index: test/CodeGen/builtins-x86.c =================================================================== --- test/CodeGen/builtins-x86.c +++ test/CodeGen/builtins-x86.c @@ -419,9 +419,6 @@ tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7); tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7); tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7); - tmp_V4d = __builtin_ia32_vinsertf128_pd256(tmp_V4d, tmp_V2d, 0x1); - tmp_V8f = __builtin_ia32_vinsertf128_ps256(tmp_V8f, tmp_V4f, 0x1); - tmp_V8i = __builtin_ia32_vinsertf128_si256(tmp_V8i, tmp_V4i, 0x1); tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d); tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f); tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f);