Index: cfe/trunk/include/clang/Basic/BuiltinsAArch64.def =================================================================== --- cfe/trunk/include/clang/Basic/BuiltinsAArch64.def +++ cfe/trunk/include/clang/Basic/BuiltinsAArch64.def @@ -53,4 +53,12 @@ // Prefetch BUILTIN(__builtin_arm_prefetch, "vvC*UiUiUiUi", "nc") +// System Registers +BUILTIN(__builtin_arm_rsr, "UicC*", "nc") +BUILTIN(__builtin_arm_rsr64, "LUicC*", "nc") +BUILTIN(__builtin_arm_rsrp, "v*cC*", "nc") +BUILTIN(__builtin_arm_wsr, "vcC*Ui", "nc") +BUILTIN(__builtin_arm_wsr64, "vcC*LUi", "nc") +BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc") + #undef BUILTIN Index: cfe/trunk/include/clang/Basic/BuiltinsARM.def =================================================================== --- cfe/trunk/include/clang/Basic/BuiltinsARM.def +++ cfe/trunk/include/clang/Basic/BuiltinsARM.def @@ -84,6 +84,14 @@ // Prefetch BUILTIN(__builtin_arm_prefetch, "vvC*UiUi", "nc") +// System registers (ACLE) +BUILTIN(__builtin_arm_rsr, "UicC*", "nc") +BUILTIN(__builtin_arm_rsr64, "LLUicC*", "nc") +BUILTIN(__builtin_arm_rsrp, "v*cC*", "nc") +BUILTIN(__builtin_arm_wsr, "vcC*Ui", "nc") +BUILTIN(__builtin_arm_wsr64, "vcC*LLUi", "nc") +BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc") + // MSVC LANGBUILTIN(__emit, "vIUiC", "", ALL_MS_LANGUAGES) Index: cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td +++ cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td @@ -108,6 +108,7 @@ def err_ice_too_large : Error< "integer constant expression evaluates to value %0 that cannot be " "represented in a %1-bit %select{signed|unsigned}2 integer type">; +def err_expr_not_string_literal : Error<"expression is not a string literal">; // Semantic analysis of constant literals. def ext_predef_outside_function : Warning< @@ -424,6 +425,7 @@ "incompatible redeclaration of library function %0">, InGroup>; def err_builtin_definition : Error<"definition of builtin function %0">; +def err_arm_invalid_specialreg : Error<"invalid special register for builtin">; def warn_builtin_unknown : Warning<"use of unknown builtin %0">, InGroup, DefaultError; def warn_dyn_class_memaccess : Warning< Index: cfe/trunk/include/clang/Sema/Sema.h =================================================================== --- cfe/trunk/include/clang/Sema/Sema.h +++ cfe/trunk/include/clang/Sema/Sema.h @@ -8604,7 +8604,9 @@ llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); - + bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, + int ArgNum, unsigned ExpectedFieldNum, + bool AllowName); public: enum FormatStringType { FST_Scanf, Index: cfe/trunk/lib/CodeGen/CGBuiltin.cpp =================================================================== --- cfe/trunk/lib/CodeGen/CGBuiltin.cpp +++ cfe/trunk/lib/CodeGen/CGBuiltin.cpp @@ -3279,6 +3279,66 @@ } } +// Generates the IR for the read/write special register builtin, +// ValueType is the type of the value that is to be written or read, +// RegisterType is the type of the register being written to or read from. +static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, + const CallExpr *E, + llvm::Type *RegisterType, + llvm::Type *ValueType, bool IsRead) { + // write and register intrinsics only support 32 and 64 bit operations. + assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) + && "Unsupported size for register."); + + CodeGen::CGBuilderTy &Builder = CGF.Builder; + CodeGen::CodeGenModule &CGM = CGF.CGM; + LLVMContext &Context = CGM.getLLVMContext(); + + const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); + StringRef SysReg = cast(SysRegStrExpr)->getString(); + + llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; + llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); + llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); + + llvm::Type *Types[] = { RegisterType }; + + bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); + assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) + && "Can't fit 64-bit value in 32-bit register"); + + if (IsRead) { + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); + llvm::Value *Call = Builder.CreateCall(F, Metadata); + + if (MixedTypes) + // Read into 64 bit register and then truncate result to 32 bit. + return Builder.CreateTrunc(Call, ValueType); + + if (ValueType->isPointerTy()) + // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*). + return Builder.CreateIntToPtr(Call, ValueType); + + return Call; + } + + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); + llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); + if (MixedTypes) { + // Extend 32 bit write value to 64 bit to pass to write. + ArgValue = Builder.CreateZExt(ArgValue, RegisterType); + return Builder.CreateCall(F, { Metadata, ArgValue }); + } + + if (ValueType->isPointerTy()) { + // Have VoidPtrTy ArgValue but want to return an i32/i64. + ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); + return Builder.CreateCall(F, { Metadata, ArgValue }); + } + + return Builder.CreateCall(F, { Metadata, ArgValue }); +} + Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { if (auto Hint = GetValueForARMHint(BuiltinID)) @@ -3491,6 +3551,37 @@ } } + if (BuiltinID == ARM::BI__builtin_arm_rsr || + BuiltinID == ARM::BI__builtin_arm_rsr64 || + BuiltinID == ARM::BI__builtin_arm_rsrp || + BuiltinID == ARM::BI__builtin_arm_wsr || + BuiltinID == ARM::BI__builtin_arm_wsr64 || + BuiltinID == ARM::BI__builtin_arm_wsrp) { + + bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr || + BuiltinID == ARM::BI__builtin_arm_rsr64 || + BuiltinID == ARM::BI__builtin_arm_rsrp; + + bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp || + BuiltinID == ARM::BI__builtin_arm_wsrp; + + bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 || + BuiltinID == ARM::BI__builtin_arm_wsr64; + + llvm::Type *ValueType; + llvm::Type *RegisterType; + if (IsPointerBuiltin) { + ValueType = VoidPtrTy; + RegisterType = Int32Ty; + } else if (Is64Bit) { + ValueType = RegisterType = Int64Ty; + } else { + ValueType = RegisterType = Int32Ty; + } + + return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead); + } + // Find out if any arguments are required to be integer constant // expressions. unsigned ICEArguments = 0; @@ -4239,6 +4330,36 @@ return Builder.CreateCall(F, {Arg0, Arg1}); } + if (BuiltinID == AArch64::BI__builtin_arm_rsr || + BuiltinID == AArch64::BI__builtin_arm_rsr64 || + BuiltinID == AArch64::BI__builtin_arm_rsrp || + BuiltinID == AArch64::BI__builtin_arm_wsr || + BuiltinID == AArch64::BI__builtin_arm_wsr64 || + BuiltinID == AArch64::BI__builtin_arm_wsrp) { + + bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr || + BuiltinID == AArch64::BI__builtin_arm_rsr64 || + BuiltinID == AArch64::BI__builtin_arm_rsrp; + + bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp || + BuiltinID == AArch64::BI__builtin_arm_wsrp; + + bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr && + BuiltinID != AArch64::BI__builtin_arm_wsr; + + llvm::Type *ValueType; + llvm::Type *RegisterType = Int64Ty; + if (IsPointerBuiltin) { + ValueType = VoidPtrTy; + } else if (Is64Bit) { + ValueType = Int64Ty; + } else { + ValueType = Int32Ty; + } + + return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead); + } + // Find out if any arguments are required to be integer constant // expressions. unsigned ICEArguments = 0; Index: cfe/trunk/lib/Headers/arm_acle.h =================================================================== --- cfe/trunk/lib/Headers/arm_acle.h +++ cfe/trunk/lib/Headers/arm_acle.h @@ -289,6 +289,14 @@ } #endif +/* 10.1 Special register intrinsics */ +#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg) +#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg) +#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg) +#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v) +#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v) +#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v) + #if defined(__cplusplus) } #endif Index: cfe/trunk/lib/Sema/SemaChecking.cpp =================================================================== --- cfe/trunk/lib/Sema/SemaChecking.cpp +++ cfe/trunk/lib/Sema/SemaChecking.cpp @@ -836,6 +836,16 @@ SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); } + if (BuiltinID == ARM::BI__builtin_arm_rsr64 || + BuiltinID == ARM::BI__builtin_arm_wsr64) + return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); + + if (BuiltinID == ARM::BI__builtin_arm_rsr || + BuiltinID == ARM::BI__builtin_arm_rsrp || + BuiltinID == ARM::BI__builtin_arm_wsr || + BuiltinID == ARM::BI__builtin_arm_wsrp) + return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); + if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) return true; @@ -876,6 +886,16 @@ SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); } + if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || + BuiltinID == AArch64::BI__builtin_arm_wsr64) + return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, false); + + if (BuiltinID == AArch64::BI__builtin_arm_rsr || + BuiltinID == AArch64::BI__builtin_arm_rsrp || + BuiltinID == AArch64::BI__builtin_arm_wsr || + BuiltinID == AArch64::BI__builtin_arm_wsrp) + return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); + if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) return true; @@ -2593,6 +2613,107 @@ return false; } +/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr +/// TheCall is an ARM/AArch64 special register string literal. +bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, + int ArgNum, unsigned ExpectedFieldNum, + bool AllowName) { + bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || + BuiltinID == ARM::BI__builtin_arm_wsr64 || + BuiltinID == ARM::BI__builtin_arm_rsr || + BuiltinID == ARM::BI__builtin_arm_rsrp || + BuiltinID == ARM::BI__builtin_arm_wsr || + BuiltinID == ARM::BI__builtin_arm_wsrp; + bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || + BuiltinID == AArch64::BI__builtin_arm_wsr64 || + BuiltinID == AArch64::BI__builtin_arm_rsr || + BuiltinID == AArch64::BI__builtin_arm_rsrp || + BuiltinID == AArch64::BI__builtin_arm_wsr || + BuiltinID == AArch64::BI__builtin_arm_wsrp; + assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); + + // We can't check the value of a dependent argument. + Expr *Arg = TheCall->getArg(ArgNum); + if (Arg->isTypeDependent() || Arg->isValueDependent()) + return false; + + // Check if the argument is a string literal. + if (!isa(Arg->IgnoreParenImpCasts())) + return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal) + << Arg->getSourceRange(); + + // Check the type of special register given. + StringRef Reg = cast(Arg->IgnoreParenImpCasts())->getString(); + SmallVector Fields; + Reg.split(Fields, ":"); + + if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) + return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg) + << Arg->getSourceRange(); + + // If the string is the name of a register then we cannot check that it is + // valid here but if the string is of one the forms described in ACLE then we + // can check that the supplied fields are integers and within the valid + // ranges. + if (Fields.size() > 1) { + bool FiveFields = Fields.size() == 5; + + bool ValidString = true; + if (IsARMBuiltin) { + ValidString &= Fields[0].startswith_lower("cp") || + Fields[0].startswith_lower("p"); + if (ValidString) + Fields[0] = + Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); + + ValidString &= Fields[2].startswith_lower("c"); + if (ValidString) + Fields[2] = Fields[2].drop_front(1); + + if (FiveFields) { + ValidString &= Fields[3].startswith_lower("c"); + if (ValidString) + Fields[3] = Fields[3].drop_front(1); + } + } + + SmallVector Ranges; + if (FiveFields) + Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 7, 15, 15}); + else + Ranges.append({15, 7, 15}); + + for (unsigned i=0; i= 0 && IntField <= Ranges[i]); + } + + if (!ValidString) + return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg) + << Arg->getSourceRange(); + + } else if (IsAArch64Builtin && Fields.size() == 1) { + // If the register name is one of those that appear in the condition below + // and the special register builtin being used is one of the write builtins, + // then we require that the argument provided for writing to the register + // is an integer constant expression. This is because it will be lowered to + // an MSR (immediate) instruction, so we need to know the immediate at + // compile time. + if (TheCall->getNumArgs() != 2) + return false; + + std::string RegLower = Reg.lower(); + if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && + RegLower != "pan" && RegLower != "uao") + return false; + + return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); + } + + return false; +} + /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). /// This checks that the target supports __builtin_longjmp and /// that val is a constant 1. Index: cfe/trunk/test/CodeGen/arm_acle.c =================================================================== --- cfe/trunk/test/CodeGen/arm_acle.c +++ cfe/trunk/test/CodeGen/arm_acle.c @@ -336,3 +336,69 @@ uint32_t test_crc32cd(uint32_t a, uint64_t b) { return __crc32cd(a, b); } + +/* 10.1 Special register intrinsics */ +// ARM-LABEL: test_rsr +// AArch64: call i64 @llvm.read_register.i64(metadata !1) +// AArch32: call i32 @llvm.read_register.i32(metadata !3) +uint32_t test_rsr() { +#ifdef __ARM_32BIT_STATE + return __arm_rsr("cp1:2:c3:c4:5"); +#else + return __arm_rsr("1:2:3:4:5"); +#endif +} + +// ARM-LABEL: test_rsr64 +// AArch64: call i64 @llvm.read_register.i64(metadata !1) +// AArch32: call i64 @llvm.read_register.i64(metadata !4) +uint64_t test_rsr64() { +#ifdef __ARM_32BIT_STATE + return __arm_rsr64("cp1:2:c3"); +#else + return __arm_rsr64("1:2:3:4:5"); +#endif +} + +// ARM-LABEL: test_rsrp +// AArch64: call i64 @llvm.read_register.i64(metadata !2) +// AArch32: call i32 @llvm.read_register.i32(metadata !5) +void *test_rsrp() { + return __arm_rsrp("sysreg"); +} + +// ARM-LABEL: test_wsr +// AArch64: call void @llvm.write_register.i64(metadata !1, i64 %1) +// AArch32: call void @llvm.write_register.i32(metadata !3, i32 %v) +void test_wsr(uint32_t v) { +#ifdef __ARM_32BIT_STATE + __arm_wsr("cp1:2:c3:c4:5", v); +#else + __arm_wsr("1:2:3:4:5", v); +#endif +} + +// ARM-LABEL: test_wsr64 +// AArch64: call void @llvm.write_register.i64(metadata !1, i64 %v) +// AArch32: call void @llvm.write_register.i64(metadata !4, i64 %v) +void test_wsr64(uint64_t v) { +#ifdef __ARM_32BIT_STATE + __arm_wsr64("cp1:2:c3", v); +#else + __arm_wsr64("1:2:3:4:5", v); +#endif +} + +// ARM-LABEL: test_wsrp +// AArch64: call void @llvm.write_register.i64(metadata !2, i64 %1) +// AArch32: call void @llvm.write_register.i32(metadata !5, i32 %1) +void test_wsrp(void *v) { + __arm_wsrp("sysreg", v); +} + +// AArch32: !3 = !{!"cp1:2:c3:c4:5"} +// AArch32: !4 = !{!"cp1:2:c3"} +// AArch32: !5 = !{!"sysreg"} + +// AArch64: !1 = !{!"1:2:3:4:5"} +// AArch64: !2 = !{!"sysreg"} Index: cfe/trunk/test/CodeGen/builtins-arm.c =================================================================== --- cfe/trunk/test/CodeGen/builtins-arm.c +++ cfe/trunk/test/CodeGen/builtins-arm.c @@ -84,3 +84,42 @@ __builtin_arm_prefetch(&i, 1, 0); // CHECK: call {{.*}} @llvm.prefetch(i8* %{{.*}}, i32 1, i32 3, i32 0) } + +unsigned rsr() { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i32 @llvm.read_register.i32(metadata !7) + // CHECK-NEXT: ret i32 [[V0]] + return __builtin_arm_rsr("cp1:2:c3:c4:5"); +} + +unsigned long long rsr64() { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i64 @llvm.read_register.i64(metadata !8) + // CHECK-NEXT: ret i64 [[V0]] + return __builtin_arm_rsr64("cp1:2:c3"); +} + +void *rsrp() { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i32 @llvm.read_register.i32(metadata !9) + // CHECK-NEXT: [[V1:[%A-Za-z0-9.]+]] = inttoptr i32 [[V0]] to i8* + // CHECK-NEXT: ret i8* [[V1]] + return __builtin_arm_rsrp("sysreg"); +} + +void wsr(unsigned v) { + // CHECK: call void @llvm.write_register.i32(metadata !7, i32 %v) + __builtin_arm_wsr("cp1:2:c3:c4:5", v); +} + +void wsr64(unsigned long long v) { + // CHECK: call void @llvm.write_register.i64(metadata !8, i64 %v) + __builtin_arm_wsr64("cp1:2:c3", v); +} + +void wsrp(void *v) { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = ptrtoint i8* %v to i32 + // CHECK-NEXT: call void @llvm.write_register.i32(metadata !9, i32 [[V0]]) + __builtin_arm_wsrp("sysreg", v); +} + +// CHECK: !7 = !{!"cp1:2:c3:c4:5"} +// CHECK: !8 = !{!"cp1:2:c3"} +// CHECK: !9 = !{!"sysreg"} Index: cfe/trunk/test/CodeGen/builtins-arm64.c =================================================================== --- cfe/trunk/test/CodeGen/builtins-arm64.c +++ cfe/trunk/test/CodeGen/builtins-arm64.c @@ -43,3 +43,39 @@ __builtin_arm_prefetch(0, 0, 0, 0, 0); // plil1keep // CHECK: call {{.*}} @llvm.prefetch(i8* null, i32 0, i32 3, i32 0) } + +unsigned rsr() { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i64 @llvm.read_register.i64(metadata !1) + // CHECK-NEXT: trunc i64 [[V0]] to i32 + return __builtin_arm_rsr("1:2:3:4:5"); +} + +unsigned long rsr64() { + // CHECK: call i64 @llvm.read_register.i64(metadata !1) + return __builtin_arm_rsr64("1:2:3:4:5"); +} + +void *rsrp() { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = {{.*}} call i64 @llvm.read_register.i64(metadata !1) + // CHECK-NEXT: inttoptr i64 [[V0]] to i8* + return __builtin_arm_rsrp("1:2:3:4:5"); +} + +void wsr(unsigned v) { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = zext i32 %v to i64 + // CHECK-NEXT: call void @llvm.write_register.i64(metadata !1, i64 [[V0]]) + __builtin_arm_wsr("1:2:3:4:5", v); +} + +void wsr64(unsigned long v) { + // CHECK: call void @llvm.write_register.i64(metadata !1, i64 %v) + __builtin_arm_wsr64("1:2:3:4:5", v); +} + +void wsrp(void *v) { + // CHECK: [[V0:[%A-Za-z0-9.]+]] = ptrtoint i8* %v to i64 + // CHECK-NEXT: call void @llvm.write_register.i64(metadata !1, i64 [[V0]]) + __builtin_arm_wsrp("1:2:3:4:5", v); +} + +// CHECK: !1 = !{!"1:2:3:4:5"} Index: cfe/trunk/test/Sema/aarch64-special-register.c =================================================================== --- cfe/trunk/test/Sema/aarch64-special-register.c +++ cfe/trunk/test/Sema/aarch64-special-register.c @@ -0,0 +1,77 @@ +// RUN: %clang_cc1 -ffreestanding -fsyntax-only -verify -triple aarch64 %s + +void string_literal(unsigned v) { + __builtin_arm_wsr(0, v); // expected-error {{expression is not a string literal}} +} + +void wsr_1(unsigned v) { + __builtin_arm_wsr("sysreg", v); +} + +void wsrp_1(void *v) { + __builtin_arm_wsrp("sysreg", v); +} + +void wsr64_1(unsigned long v) { + __builtin_arm_wsr64("sysreg", v); //expected-error {{invalid special register for builtin}} +} + +unsigned rsr_1() { + return __builtin_arm_rsr("sysreg"); +} + +void *rsrp_1() { + return __builtin_arm_rsrp("sysreg"); +} + +unsigned long rsr64_1() { + return __builtin_arm_rsr64("sysreg"); //expected-error {{invalid special register for builtin}} +} + +void wsr_2(unsigned v) { + __builtin_arm_wsr("0:1:2:3:4", v); +} + +void wsrp_2(void *v) { + __builtin_arm_wsrp("0:1:2:3:4", v); +} + +void wsr64_2(unsigned long v) { + __builtin_arm_wsr64("0:1:2:3:4", v); +} + +unsigned rsr_2() { + return __builtin_arm_rsr("0:1:2:3:4"); +} + +void *rsrp_2() { + return __builtin_arm_rsrp("0:1:2:3:4"); +} + +unsigned long rsr64_2() { + return __builtin_arm_rsr64("0:1:2:3:4"); +} + +void wsr_3(unsigned v) { + __builtin_arm_wsr("0:1:2", v); //expected-error {{invalid special register for builtin}} +} + +void wsrp_3(void *v) { + __builtin_arm_wsrp("0:1:2", v); //expected-error {{invalid special register for builtin}} +} + +void wsr64_3(unsigned long v) { + __builtin_arm_wsr64("0:1:2", v); //expected-error {{invalid special register for builtin}} +} + +unsigned rsr_3() { + return __builtin_arm_rsr("0:1:2"); //expected-error {{invalid special register for builtin}} +} + +void *rsrp_3() { + return __builtin_arm_rsrp("0:1:2"); //expected-error {{invalid special register for builtin}} +} + +unsigned long rsr64_3() { + return __builtin_arm_rsr64("0:1:2"); //expected-error {{invalid special register for builtin}} +} Index: cfe/trunk/test/Sema/arm-special-register.c =================================================================== --- cfe/trunk/test/Sema/arm-special-register.c +++ cfe/trunk/test/Sema/arm-special-register.c @@ -0,0 +1,89 @@ +// RUN: %clang_cc1 -ffreestanding -fsyntax-only -verify -triple arm %s + +void string_literal(unsigned v) { + __builtin_arm_wsr(0, v); // expected-error {{expression is not a string literal}} +} + +void wsr_1(unsigned v) { + __builtin_arm_wsr("sysreg", v); +} + +void wsrp_1(void *v) { + __builtin_arm_wsrp("sysreg", v); +} + +void wsr64_1(unsigned long v) { + __builtin_arm_wsr64("sysreg", v); //expected-error {{invalid special register for builtin}} +} + +unsigned rsr_1() { + return __builtin_arm_rsr("sysreg"); +} + +void *rsrp_1() { + return __builtin_arm_rsrp("sysreg"); +} + +unsigned long rsr64_1() { + return __builtin_arm_rsr64("sysreg"); //expected-error {{invalid special register for builtin}} +} + +void wsr_2(unsigned v) { + __builtin_arm_wsr("cp0:1:c2:c3:4", v); +} + +void wsrp_2(void *v) { + __builtin_arm_wsrp("cp0:1:c2:c3:4", v); +} + +void wsr64_2(unsigned long v) { + __builtin_arm_wsr64("cp0:1:c2:c3:4", v); //expected-error {{invalid special register for builtin}} +} + +unsigned rsr_2() { + return __builtin_arm_rsr("cp0:1:c2:c3:4"); +} + +void *rsrp_2() { + return __builtin_arm_rsrp("cp0:1:c2:c3:4"); +} + +unsigned long rsr64_2() { + return __builtin_arm_rsr64("cp0:1:c2:c3:4"); //expected-error {{invalid special register for builtin}} +} + +void wsr_3(unsigned v) { + __builtin_arm_wsr("cp0:1:c2", v); //expected-error {{invalid special register for builtin}} +} + +void wsrp_3(void *v) { + __builtin_arm_wsrp("cp0:1:c2", v); //expected-error {{invalid special register for builtin}} +} + +void wsr64_3(unsigned long v) { + __builtin_arm_wsr64("cp0:1:c2", v); +} + +unsigned rsr_3() { + return __builtin_arm_rsr("cp0:1:c2"); //expected-error {{invalid special register for builtin}} +} + +void *rsrp_3() { + return __builtin_arm_rsrp("cp0:1:c2"); //expected-error {{invalid special register for builtin}} +} + +unsigned long rsr64_3() { + return __builtin_arm_rsr64("cp0:1:c2"); +} + +unsigned rsr_4() { + return __builtin_arm_rsr("0:1:2:3:4"); //expected-error {{invalid special register for builtin}} +} + +void *rsrp_4() { + return __builtin_arm_rsrp("0:1:2:3:4"); //expected-error {{invalid special register for builtin}} +} + +unsigned long rsr64_4() { + return __builtin_arm_rsr64("0:1:2"); //expected-error {{invalid special register for builtin}} +}