Index: cfe/trunk/include/clang/Basic/BuiltinsX86.def =================================================================== --- cfe/trunk/include/clang/Basic/BuiltinsX86.def +++ cfe/trunk/include/clang/Basic/BuiltinsX86.def @@ -665,6 +665,20 @@ BUILTIN(__builtin_ia32_fxsave, "vv*", "") BUILTIN(__builtin_ia32_fxsave64, "vv*", "") +// XSAVE +BUILTIN(__builtin_ia32_xsave, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsave64, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xrstor, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xrstor64, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsaveopt, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsaveopt64, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xrstors, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xrstors64, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsavec, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsavec64, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsaves, "vv*ULLi", "") +BUILTIN(__builtin_ia32_xsaves64, "vv*ULLi", "") + // ADX TARGET_BUILTIN(__builtin_ia32_addcarryx_u32, "UcUcUiUiUi*", "", "adx") TARGET_BUILTIN(__builtin_ia32_addcarryx_u64, "UcUcULLiULLiULLi*", "", "adx") Index: cfe/trunk/lib/CodeGen/CGBuiltin.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGBuiltin.cpp +++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp @@ -6083,6 +6083,46 @@ Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); return Builder.CreateLoad(Tmp, "stmxcsr"); } + case X86::BI__builtin_ia32_xsave: + case X86::BI__builtin_ia32_xsave64: + case X86::BI__builtin_ia32_xrstor: + case X86::BI__builtin_ia32_xrstor64: + case X86::BI__builtin_ia32_xsaveopt: + case X86::BI__builtin_ia32_xsaveopt64: + case X86::BI__builtin_ia32_xrstors: + case X86::BI__builtin_ia32_xrstors64: + case X86::BI__builtin_ia32_xsavec: + case X86::BI__builtin_ia32_xsavec64: + case X86::BI__builtin_ia32_xsaves: + case X86::BI__builtin_ia32_xsaves64: { + Intrinsic::ID ID; +#define INTRINSIC_X86_XSAVE_ID(NAME) \ + case X86::BI__builtin_ia32_##NAME: \ + ID = Intrinsic::x86_##NAME; \ + break + switch (BuiltinID) { + default: llvm_unreachable("Unsupported intrinsic!"); + INTRINSIC_X86_XSAVE_ID(xsave); + INTRINSIC_X86_XSAVE_ID(xsave64); + INTRINSIC_X86_XSAVE_ID(xrstor); + INTRINSIC_X86_XSAVE_ID(xrstor64); + INTRINSIC_X86_XSAVE_ID(xsaveopt); + INTRINSIC_X86_XSAVE_ID(xsaveopt64); + INTRINSIC_X86_XSAVE_ID(xrstors); + INTRINSIC_X86_XSAVE_ID(xrstors64); + INTRINSIC_X86_XSAVE_ID(xsavec); + INTRINSIC_X86_XSAVE_ID(xsavec64); + INTRINSIC_X86_XSAVE_ID(xsaves); + INTRINSIC_X86_XSAVE_ID(xsaves64); + } +#undef INTRINSIC_X86_XSAVE_ID + Value *Mhi = Builder.CreateTrunc( + Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); + Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); + Ops[1] = Mhi; + Ops.push_back(Mlo); + return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); + } case X86::BI__builtin_ia32_storehps: case X86::BI__builtin_ia32_storelps: { llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); Index: cfe/trunk/lib/Headers/CMakeLists.txt =================================================================== --- cfe/trunk/lib/Headers/CMakeLists.txt +++ cfe/trunk/lib/Headers/CMakeLists.txt @@ -66,6 +66,10 @@ x86intrin.h xmmintrin.h xopintrin.h + xsaveintrin.h + xsaveoptintrin.h + xsavecintrin.h + xsavesintrin.h xtestintrin.h ) Index: cfe/trunk/lib/Headers/Intrin.h =================================================================== --- cfe/trunk/lib/Headers/Intrin.h +++ cfe/trunk/lib/Headers/Intrin.h @@ -289,9 +289,6 @@ static __inline__ #define _XCR_XFEATURE_ENABLED_MASK 0 unsigned __int64 __cdecl _xgetbv(unsigned int); -void __cdecl _xrstor(void const *, unsigned __int64); -void __cdecl _xsave(void *, unsigned __int64); -void __cdecl _xsaveopt(void *, unsigned __int64); void __cdecl _xsetbv(unsigned int, unsigned __int64); /* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */ @@ -431,9 +428,6 @@ (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand; return _FullProduct >> 64; } -void __cdecl _xrstor64(void const *, unsigned __int64); -void __cdecl _xsave64(void *, unsigned __int64); -void __cdecl _xsaveopt64(void *, unsigned __int64); #endif /* __x86_64__ */ Index: cfe/trunk/lib/Headers/immintrin.h =================================================================== --- cfe/trunk/lib/Headers/immintrin.h +++ cfe/trunk/lib/Headers/immintrin.h @@ -144,6 +144,14 @@ #include +#include + +#include + +#include + +#include + /* Some intrinsics inside adxintrin.h are available only on processors with ADX, * whereas others are also available at all times. */ #include Index: cfe/trunk/lib/Headers/xsavecintrin.h =================================================================== --- cfe/trunk/lib/Headers/xsavecintrin.h +++ cfe/trunk/lib/Headers/xsavecintrin.h @@ -0,0 +1,48 @@ +/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVECINTRIN_H +#define __XSAVECINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec64(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif Index: cfe/trunk/lib/Headers/xsaveintrin.h =================================================================== --- cfe/trunk/lib/Headers/xsaveintrin.h +++ cfe/trunk/lib/Headers/xsaveintrin.h @@ -0,0 +1,58 @@ +/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEINTRIN_H +#define __XSAVEINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsave(void *__p, unsigned long long __m) { + return __builtin_ia32_xsave(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor(void *__p, unsigned long long __m) { + return __builtin_ia32_xrstor(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsave64(void *__p, unsigned long long __m) { + return __builtin_ia32_xsave64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor64(void *__p, unsigned long long __m) { + return __builtin_ia32_xrstor64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif Index: cfe/trunk/lib/Headers/xsaveoptintrin.h =================================================================== --- cfe/trunk/lib/Headers/xsaveoptintrin.h +++ cfe/trunk/lib/Headers/xsaveoptintrin.h @@ -0,0 +1,48 @@ +/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEOPTINTRIN_H +#define __XSAVEOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt(void *__p, unsigned long long __m) { + return __builtin_ia32_xsaveopt(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt64(void *__p, unsigned long long __m) { + return __builtin_ia32_xsaveopt64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif Index: cfe/trunk/lib/Headers/xsavesintrin.h =================================================================== --- cfe/trunk/lib/Headers/xsavesintrin.h +++ cfe/trunk/lib/Headers/xsavesintrin.h @@ -0,0 +1,58 @@ +/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVESINTRIN_H +#define __XSAVESINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors64(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves64(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif Index: cfe/trunk/test/CodeGen/builtins-x86.c =================================================================== --- cfe/trunk/test/CodeGen/builtins-x86.c +++ cfe/trunk/test/CodeGen/builtins-x86.c @@ -42,7 +42,7 @@ signed int tmp_i; unsigned int tmp_Ui; signed long long tmp_LLi; -// unsigned long long tmp_ULLi; + unsigned long long tmp_ULLi; float tmp_f; double tmp_d; @@ -267,6 +267,20 @@ (void)__builtin_ia32_fxsave64(tmp_vp); (void)__builtin_ia32_fxrstor(tmp_vp); (void)__builtin_ia32_fxrstor64(tmp_vp); + + (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); + (void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi); + tmp_V4f = __builtin_ia32_cvtpi2ps(tmp_V4f, tmp_V2i); tmp_V2i = __builtin_ia32_cvtps2pi(tmp_V4f); tmp_i = __builtin_ia32_cvtss2si(tmp_V4f); Index: cfe/trunk/test/CodeGen/x86_32-xsave.c =================================================================== --- cfe/trunk/test/CodeGen/x86_32-xsave.c +++ cfe/trunk/test/CodeGen/x86_32-xsave.c @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE +// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE + +// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaveopt -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT +// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaveopt -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT + +// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsavec -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC +// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsavec -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC + +// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaves -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES +// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaves -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES + +void test() { + unsigned long long tmp_ULLi; + void* tmp_vp; + +#ifdef TEST_XSAVE +// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); + +// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) + (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); +#endif + +#ifdef TEST_XSAVEOPT +// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); +#endif + +#ifdef TEST_XSAVEC +// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); +#endif + +#ifdef TEST_XSAVES +// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); + +// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 +// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) + (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); +#endif +} Index: cfe/trunk/test/CodeGen/x86_64-xsave.c =================================================================== --- cfe/trunk/test/CodeGen/x86_64-xsave.c +++ cfe/trunk/test/CodeGen/x86_64-xsave.c @@ -0,0 +1,120 @@ +// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE +// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE + +// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaveopt -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT +// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaveopt -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT + +// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsavec -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC +// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsavec -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC + +// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaves -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES +// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaves -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES + +void test() { + unsigned long long tmp_ULLi; + void* tmp_vp; + +#ifdef TEST_XSAVE +// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); + +// XSAVE: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVE: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVE: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVE: call void @llvm.x86.xsave64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) + (void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi); + +// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) + (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); + +// XSAVE: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVE: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVE: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32 +// XSAVE: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32 +// XSAVE: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32 +// XSAVE: call void @llvm.x86.xrstor64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]]) + (void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi); +#endif + +#ifdef TEST_XSAVEOPT +// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); + +// XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEOPT: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVEOPT: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVEOPT: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVEOPT: call void @llvm.x86.xsaveopt64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) + (void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi); +#endif + +#ifdef TEST_XSAVEC +// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); + +// XSAVEC: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVEC: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVEC: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVEC: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVEC: call void @llvm.x86.xsavec64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) + (void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi); +#endif + +#ifdef TEST_XSAVES +// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 +// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 +// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 +// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) + (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); + +// XSAVES: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32 +// XSAVES: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32 +// XSAVES: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32 +// XSAVES: call void @llvm.x86.xsaves64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]]) + (void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi); + +// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 +// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 +// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 +// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) + (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); + +// XSAVES: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8 +// XSAVES: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 +// XSAVES: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32 +// XSAVES: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32 +// XSAVES: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32 +// XSAVES: call void @llvm.x86.xrstors64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]]) + (void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi); +#endif +}