diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def --- a/clang/include/clang/Basic/Builtins.def +++ b/clang/include/clang/Basic/Builtins.def @@ -486,9 +486,12 @@ BUILTIN(__builtin_memcmp, "ivC*vC*z", "nF") BUILTIN(__builtin_memcpy, "v*v*vC*z", "nF") BUILTIN(__builtin_memcpy_inline, "vv*vC*Iz", "nt") +BUILTIN(__builtin_overloaded_memcpy, "v*v*vC*z", "nt") BUILTIN(__builtin_memmove, "v*v*vC*z", "nF") +BUILTIN(__builtin_overloaded_memmove, "v*v*vC*z", "nt") BUILTIN(__builtin_mempcpy, "v*v*vC*z", "nF") BUILTIN(__builtin_memset, "v*v*iz", "nF") +BUILTIN(__builtin_overloaded_memset, "v*v*iz", "nt") BUILTIN(__builtin_printf, "icC*.", "Fp:0:") BUILTIN(__builtin_stpcpy, "c*c*cC*", "nF") BUILTIN(__builtin_stpncpy, "c*c*cC*z", "nF") diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -7935,9 +7935,6 @@ def err_atomic_op_needs_non_const_pointer : Error< "address argument to atomic operation must be a pointer to non-const " "type (%0 invalid)">; -def err_atomic_op_needs_trivial_copy : Error< - "address argument to atomic operation must be a pointer to a " - "trivially-copyable type (%0 invalid)">; def err_atomic_op_needs_atomic_int_or_ptr : Error< "address argument to atomic operation must be a pointer to %select{|atomic }0" "integer or pointer (%1 invalid)">; @@ -8899,6 +8896,16 @@ "null returned from %select{function|method}0 that requires a non-null return value">, InGroup; +def err_const_arg : Error<"argument must be non-const, got %0">; + +def err_arity_mismatch : Error<"too %select{few|many}0 arguments to function call, expected %1, have %2">; + +def err_argument_needs_trivial_copy : Error<"address argument must be a pointer to a trivially-copyable type (%0 invalid)">; + +def err_atomic_volatile_unsupported : Error<"mixing _Atomic and volatile qualifiers is unsupported (%select{%1|%1 and %2}0 cannot have both _Atomic and volatile)">; + +def err_atomic_sizes_must_match : Error<"_Atomic sizes must match, %0 is %1 bytes and %2 is %3 bytes">; + def err_lifetimebound_no_object_param : Error< "'lifetimebound' attribute cannot be applied; %select{static |non-}0member " "function has no implicit object parameter">; @@ -9663,8 +9670,6 @@ // OpenCL v2.0 s6.13.6 -- Builtin Pipe Functions def err_opencl_builtin_pipe_first_arg : Error< "first argument to %0 must be a pipe type">; -def err_opencl_builtin_pipe_arg_num : Error< - "invalid number of arguments to function: %0">; def err_opencl_builtin_pipe_invalid_arg : Error< "invalid argument type to function %0 (expecting %1 having %2)">; def err_opencl_builtin_pipe_invalid_access_modifier : Error< @@ -9691,8 +9696,6 @@ "cannot refer to a block inside block">; // OpenCL v2.0 s6.13.9 - Address space qualifier functions. -def err_opencl_builtin_to_addr_arg_num : Error< - "invalid number of arguments to function: %0">; def err_opencl_builtin_to_addr_invalid_arg : Error< "invalid argument %0 to function: %1, expecting a generic pointer argument">; diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h --- a/clang/lib/CodeGen/CGBuilder.h +++ b/clang/lib/CodeGen/CGBuilder.h @@ -279,6 +279,15 @@ IsVolatile); } + using CGBuilderBaseTy::CreateElementUnorderedAtomicMemCpy; + llvm::CallInst *CreateElementUnorderedAtomicMemCpy(Address Dest, Address Src, + llvm::Value *Size, + CharUnits ElementSize) { + return CreateElementUnorderedAtomicMemCpy( + Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(), + Src.getAlignment().getAsAlign(), Size, ElementSize.getQuantity()); + } + using CGBuilderBaseTy::CreateMemCpyInline; llvm::CallInst *CreateMemCpyInline(Address Dest, Address Src, uint64_t Size) { return CreateMemCpyInline( @@ -294,6 +303,15 @@ Size, IsVolatile); } + using CGBuilderBaseTy::CreateElementUnorderedAtomicMemMove; + llvm::CallInst *CreateElementUnorderedAtomicMemMove(Address Dest, Address Src, + llvm::Value *Size, + CharUnits ElementSize) { + return CreateElementUnorderedAtomicMemMove( + Dest.getPointer(), Dest.getAlignment().getAsAlign(), Src.getPointer(), + Src.getAlignment().getAsAlign(), Size, ElementSize.getQuantity()); + } + using CGBuilderBaseTy::CreateMemSet; llvm::CallInst *CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile = false) { @@ -301,6 +319,16 @@ Dest.getAlignment().getAsAlign(), IsVolatile); } + using CGBuilderBaseTy::CreateElementUnorderedAtomicMemSet; + llvm::CallInst *CreateElementUnorderedAtomicMemSet(Address Dest, + llvm::Value *Value, + llvm::Value *Size, + CharUnits ElementSize) { + return CreateElementUnorderedAtomicMemSet(Dest.getPointer(), Value, Size, + Dest.getAlignment().getAsAlign(), + ElementSize.getQuantity()); + } + using CGBuilderBaseTy::CreatePreserveStructAccessIndex; Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -624,6 +624,16 @@ return {Width, Signed}; } +static QualType getPtrArgType(CodeGenModule &CGM, const CallExpr *E, + unsigned ArgNo) { + QualType ArgTy = E->getArg(ArgNo)->IgnoreImpCasts()->getType(); + if (ArgTy->isArrayType()) + return CGM.getContext().getAsArrayType(ArgTy)->getElementType(); + if (ArgTy->isObjCObjectPointerType()) + return ArgTy->castAs()->getPointeeType(); + return ArgTy->castAs()->getPointeeType(); +} + Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { llvm::Type *DestType = Int8PtrTy; if (ArgValue->getType() != DestType) @@ -2617,16 +2627,27 @@ } case Builtin::BImemcpy: case Builtin::BI__builtin_memcpy: + case Builtin::BI__builtin_overloaded_memcpy: case Builtin::BImempcpy: case Builtin::BI__builtin_mempcpy: { + QualType DestTy = getPtrArgType(CGM, E, 0); + QualType SrcTy = getPtrArgType(CGM, E, 1); Address Dest = EmitPointerWithAlignment(E->getArg(0)); Address Src = EmitPointerWithAlignment(E->getArg(1)); + bool isVolatile = + DestTy.isVolatileQualified() || SrcTy.isVolatileQualified(); + bool isAtomic = DestTy->isAtomicType() || SrcTy->isAtomicType(); Value *SizeVal = EmitScalarExpr(E->getArg(2)); + CharUnits ElementSize = CGM.getContext().getTypeSizeInChars(DestTy); EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, 0); EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD, 1); - Builder.CreateMemCpy(Dest, Src, SizeVal, false); + if (isAtomic) + Builder.CreateElementUnorderedAtomicMemCpy(Dest, Src, SizeVal, + ElementSize); + else + Builder.CreateMemCpy(Dest, Src, SizeVal, isVolatile); if (BuiltinID == Builtin::BImempcpy || BuiltinID == Builtin::BI__builtin_mempcpy) return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal)); @@ -2695,26 +2716,46 @@ } case Builtin::BImemmove: - case Builtin::BI__builtin_memmove: { + case Builtin::BI__builtin_memmove: + case Builtin::BI__builtin_overloaded_memmove: { + QualType DestTy = getPtrArgType(CGM, E, 0); + QualType SrcTy = getPtrArgType(CGM, E, 1); Address Dest = EmitPointerWithAlignment(E->getArg(0)); Address Src = EmitPointerWithAlignment(E->getArg(1)); + bool isVolatile = + DestTy.isVolatileQualified() || SrcTy.isVolatileQualified(); + bool isAtomic = DestTy->isAtomicType() || SrcTy->isAtomicType(); Value *SizeVal = EmitScalarExpr(E->getArg(2)); + CharUnits ElementSize = CGM.getContext().getTypeSizeInChars(DestTy); EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, 0); EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD, 1); - Builder.CreateMemMove(Dest, Src, SizeVal, false); + if (isAtomic) + Builder.CreateElementUnorderedAtomicMemMove(Dest, Src, SizeVal, + ElementSize); + else + Builder.CreateMemMove(Dest, Src, SizeVal, isVolatile); return RValue::get(Dest.getPointer()); } case Builtin::BImemset: - case Builtin::BI__builtin_memset: { + case Builtin::BI__builtin_memset: + case Builtin::BI__builtin_overloaded_memset: { + QualType DestTy = getPtrArgType(CGM, E, 0); Address Dest = EmitPointerWithAlignment(E->getArg(0)); Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty()); + bool isVolatile = DestTy.isVolatileQualified(); + bool isAtomic = DestTy->isAtomicType(); Value *SizeVal = EmitScalarExpr(E->getArg(2)); + CharUnits ElementSize = CGM.getContext().getTypeSizeInChars(DestTy); EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, 0); - Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); + if (isAtomic) + Builder.CreateElementUnorderedAtomicMemSet(Dest, ByteVal, SizeVal, + ElementSize); + else + Builder.CreateMemSet(Dest, ByteVal, SizeVal, isVolatile); return RValue::get(Dest.getPointer()); } case Builtin::BI__builtin___memset_chk: { diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1061,13 +1061,13 @@ // LValue Expression Emission //===----------------------------------------------------------------------===// -/// EmitPointerWithAlignment - Given an expression of pointer type, try to -/// derive a more accurate bound on the alignment of the pointer. +/// Given an expression of pointer type, try to derive a more accurate bound on +/// the alignment of the pointer. Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) { // We allow this with ObjC object pointers because of fragile ABIs. - assert(E->getType()->isPointerType() || + assert(E->getType()->isPointerType() || E->getType()->isArrayType() || E->getType()->isObjCObjectPointerType()); E = E->IgnoreParens(); @@ -1164,6 +1164,9 @@ // TODO: conditional operators, comma. + if (E->getType()->isArrayType()) + return EmitArrayToPointerDecay(E, BaseInfo, TBAAInfo); + // Otherwise, use the alignment of the type. CharUnits Align = CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo); diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -1187,8 +1187,9 @@ return true; } break; default: - S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) - << Call->getDirectCallee() << Call->getSourceRange(); + S.Diag(Call->getBeginLoc(), diag::err_arity_mismatch) + << (Call->getNumArgs() > 4) << "2 or 4" << Call->getNumArgs() + << Call->getSourceRange(); return true; } @@ -1274,8 +1275,9 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, CallExpr *Call) { if (Call->getNumArgs() != 1) { - S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) - << Call->getDirectCallee() << Call->getSourceRange(); + S.Diag(Call->getBeginLoc(), diag::err_arity_mismatch) + << (Call->getNumArgs() > 1) << 1 << Call->getNumArgs() + << Call->getSourceRange(); return true; } @@ -1437,6 +1439,168 @@ CallExpr *TheCall) { ExprResult TheCallResult(TheCall); + enum class MemCheckType { Full, Basic }; + + auto CheckArityIs = [&](unsigned ExpectedArity) { + if (TheCall->getNumArgs() == ExpectedArity) + return true; + Diag(TheCall->getBeginLoc(), PDiag(diag::err_arity_mismatch)) + << (TheCall->getNumArgs() > ExpectedArity) << ExpectedArity + << TheCall->getNumArgs() << TheCall->getSourceRange(); + return false; + }; + auto getPointeeOrArrayType = [&](clang::Expr *E) { + if (E->getType()->isArrayType()) + return Context.getAsArrayType(E->getType())->getElementType(); + return E->getType()->getPointeeType(); + }; + auto CheckIsObjectPointerOrArrayType = [&](clang::Expr *E) { + if (E->getType()->isObjectPointerType()) + return true; + // Arrays can decay to a pointer. + if (E->getType()->isArrayType()) + return true; + // Null values are convertible to a pointer. + Expr::EvalResult Result; + if (E->getType()->isIntegerType() && !E->isValueDependent() && + E->EvaluateAsInt(Result, Context) && (Result.Val.getInt() == 0)) + return true; + if (E->getType()->isNullPtrType()) + return true; + Diag(TheCall->getBeginLoc(), diag::err_init_conversion_failed) + << InitializedEntity::EK_Parameter << Context.VoidPtrTy << E->isLValue() + << E->getType() << 0 << E->getSourceRange(); + return false; + }; + auto CheckIsConstUnqualifiedPointerType = [&](clang::Expr *E) { + if (!E->getType()->isObjectPointerType() && !E->getType()->isArrayType()) + return true; + QualType ElTy = getPointeeOrArrayType(E); + if (!ElTy.isConstQualified()) + return true; + Diag(TheCall->getBeginLoc(), PDiag(diag::err_const_arg)) + << ElTy << E->getSourceRange(); + return false; + }; + auto CheckIsIntegerType = [&](clang::Expr *E) { + if (E->getType()->isIntegerType()) + return true; + Diag(TheCall->getBeginLoc(), diag::err_init_conversion_failed) + << InitializedEntity::EK_Parameter << Context.getSizeType() + << E->isLValue() << E->getType() << 0 << E->getSourceRange(); + return false; + }; + auto CheckIsTriviallyCopyablePointeeType = [&](clang::Expr *E) { + if (!(E->getType()->isPointerType() || E->getType()->isArrayType())) + return true; + QualType ElTy = getPointeeOrArrayType(E); + if (ElTy.getUnqualifiedType()->isVoidType()) + return true; + if (ElTy.isTriviallyCopyableType(Context)) + return true; + if (ElTy->isAtomicType()) + return true; + Diag(TheCall->getBeginLoc(), PDiag(diag::err_argument_needs_trivial_copy)) + << ElTy << E->getSourceRange(); + return false; + }; + auto CheckNotAtomicVolatilePointeeTypes = [&](clang::Expr *E0, + clang::Expr *E1) { + if (!E0->getType()->isObjectPointerType() && !E0->getType()->isArrayType()) + return true; + if (!E1->getType()->isObjectPointerType() && !E1->getType()->isArrayType()) + return true; + QualType El0Ty = getPointeeOrArrayType(E0); + QualType El1Ty = getPointeeOrArrayType(E1); + bool isVolatile = + El0Ty.isVolatileQualified() || El1Ty.isVolatileQualified(); + bool isAtomic = El0Ty->isAtomicType() || El1Ty->isAtomicType(); + if (!(isAtomic && isVolatile)) + return true; + Diag(TheCall->getBeginLoc(), PDiag(diag::err_atomic_volatile_unsupported)) + << (E0 != E1) << El0Ty << El1Ty << E0->getSourceRange() + << E1->getSourceRange(); + return false; + }; + auto CheckAtomicPointeeTypesCompatible = [&](clang::Expr *E0, + clang::Expr *E1) { + if (!E0->getType()->isObjectPointerType() && !E0->getType()->isArrayType()) + return true; + if (!E1->getType()->isObjectPointerType() && !E1->getType()->isArrayType()) + return true; + QualType El0Ty = getPointeeOrArrayType(E0); + QualType El1Ty = getPointeeOrArrayType(E1); + if (!(El0Ty->isAtomicType() || El1Ty->isAtomicType())) + return true; + if (El0Ty.getUnqualifiedType()->isVoidType() || + El1Ty.getUnqualifiedType()->isVoidType()) + return true; + CharUnits El0Sz = Context.getTypeSizeInChars(El0Ty); + CharUnits El1Sz = Context.getTypeSizeInChars(El1Ty); + if (El0Sz == El1Sz) + return true; + Diag(TheCall->getBeginLoc(), PDiag(diag::err_atomic_sizes_must_match)) + << El0Ty << (unsigned)El0Sz.getQuantity() << El1Ty + << (unsigned)El1Sz.getQuantity() << E0->getSourceRange() + << E1->getSourceRange(); + return false; + }; + auto InvokeIfKnownNonZero = [&](clang::Expr *E, auto InvokeMaybe) { + if (E->isValueDependent()) + return true; + bool IsNonZero; + if (!E->EvaluateAsBooleanCondition(IsNonZero, Context)) + return true; + if (IsNonZero) { + InvokeMaybe(); + return false; + } + return true; + }; + auto CheckMemcpy = [&](MemCheckType checkType) { + if (!CheckArityIs(3)) + return; + clang::Expr *DstOp = TheCall->getArg(0); + clang::Expr *SrcOp = TheCall->getArg(1); + clang::Expr *SizeOp = TheCall->getArg(2); + if (checkType == MemCheckType::Full) { + // This intrinsic needs to be fully type checked because it used the 't' + // metadata in Builtins.def. + if (!CheckIsObjectPointerOrArrayType(DstOp) || + !CheckIsObjectPointerOrArrayType(SrcOp) || + !CheckIsConstUnqualifiedPointerType(DstOp) || + !CheckIsIntegerType(SizeOp) || + !CheckIsTriviallyCopyablePointeeType(DstOp) || + !CheckIsTriviallyCopyablePointeeType(SrcOp) || + !CheckNotAtomicVolatilePointeeTypes(DstOp, SrcOp) || + !CheckAtomicPointeeTypesCompatible(DstOp, SrcOp)) + return; + } + // Warn about copying to or from `nullptr` pointers when `size` is known to + // be non-zero. + if (!InvokeIfKnownNonZero(SizeOp, [&]() { + CheckNonNullArgument(*this, DstOp, TheCall->getExprLoc()); + CheckNonNullArgument(*this, SrcOp, TheCall->getExprLoc()); + })) + return; + }; + auto CheckMemset = [&]() { + if (!CheckArityIs(3)) + return; + clang::Expr *DstOp = TheCall->getArg(0); + clang::Expr *ValOp = TheCall->getArg(1); + clang::Expr *SizeOp = TheCall->getArg(2); + if (!CheckIsObjectPointerOrArrayType(DstOp) || + !CheckIsConstUnqualifiedPointerType(DstOp) || + !CheckIsIntegerType(ValOp) || !CheckIsIntegerType(SizeOp) || + !CheckIsTriviallyCopyablePointeeType(DstOp) || + !CheckNotAtomicVolatilePointeeTypes(DstOp, DstOp) || + !InvokeIfKnownNonZero(SizeOp, [&]() { + CheckNonNullArgument(*this, DstOp, TheCall->getExprLoc()); + })) + return; + }; + // Find out if any arguments are required to be integer constant expressions. unsigned ICEArguments = 0; ASTContext::GetBuiltinTypeError Error; @@ -1701,19 +1865,16 @@ case Builtin::BI__builtin_nontemporal_load: case Builtin::BI__builtin_nontemporal_store: return SemaBuiltinNontemporalOverloaded(TheCallResult); - case Builtin::BI__builtin_memcpy_inline: { - clang::Expr *SizeOp = TheCall->getArg(2); - // We warn about copying to or from `nullptr` pointers when `size` is - // greater than 0. When `size` is value dependent we cannot evaluate its - // value so we bail out. - if (SizeOp->isValueDependent()) - break; - if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { - CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); - CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); - } + case Builtin::BI__builtin_memcpy_inline: + CheckMemcpy(MemCheckType::Basic); + break; + case Builtin::BI__builtin_overloaded_memcpy: + case Builtin::BI__builtin_overloaded_memmove: + CheckMemcpy(MemCheckType::Full); + break; + case Builtin::BI__builtin_overloaded_memset: + CheckMemset(); break; - } #define BUILTIN(ID, TYPE, ATTRS) #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ case Builtin::BI##ID: \ @@ -4662,7 +4823,7 @@ !AtomTy->isScalarType()) { // For GNU atomics, require a trivially-copyable type. This is not part of // the GNU atomics specification, but we enforce it for sanity. - Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) + Diag(ExprRange.getBegin(), diag::err_argument_needs_trivial_copy) << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } diff --git a/clang/test/CodeGen/builtin-overloaded-memfns.c b/clang/test/CodeGen/builtin-overloaded-memfns.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/builtin-overloaded-memfns.c @@ -0,0 +1,129 @@ +// RUN: %clang_cc1 -triple arm64-unknown-unknown -fms-extensions -emit-llvm < %s| FileCheck %s + +typedef __SIZE_TYPE__ size_t; + +// CHECK-LABEL: volatile_dst_cpy_void( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true) +void volatile_dst_cpy_void(volatile void *dst, const void *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: volatile_src_cpy_void( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true) +void volatile_src_cpy_void(void *dst, volatile const void *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: volatile_dstsrc_cpy_void( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true) +void volatile_dstsrc_cpy_void(volatile void *dst, volatile const void *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: volatile_dst_cpy_char( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true) +void volatile_dst_cpy_char(volatile char *dst, const char *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: volatile_dst_cpy_int( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 true) +void volatile_dst_cpy_int(volatile int *dst, const int *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: unaligned_dst_cpy_int( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false) +void unaligned_dst_cpy_int(__unaligned int *dst, const int *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: unaligned_src_cpy_int( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false) +void unaligned_src_cpy_int(int *dst, __unaligned const int *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: addrspace_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.p32i8.p32i8.i64(i8 addrspace(32)* align 1 %{{[0-9]*}}, i8 addrspace(32)* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false) +void addrspace_srcdst_cpy_char(__attribute__((address_space(32))) char *dst, __attribute__((address_space(32))) const char *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: restrict_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false) +void restrict_srcdst_cpy_char(char *__restrict dst, const char *__restrict src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: atomic_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 1) +void atomic_srcdst_cpy_char(_Atomic char *dst, _Atomic const char *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: atomic_srcdst_cpy_int( +// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 4) +void atomic_srcdst_cpy_int(_Atomic int *dst, _Atomic const int *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: atomic_srcdst_cpy_longlong( +// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 8 %{{[0-9]*}}, i8* align 8 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 8) +void atomic_srcdst_cpy_longlong(_Atomic long long *dst, _Atomic const long long *src, size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +// CHECK-LABEL: atomic_static_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i32 1) +void atomic_static_srcdst_cpy_char(_Atomic char dst[static 2], _Atomic const char src[2], size_t size) { __builtin_overloaded_memcpy(dst, src, size); } + +extern _Atomic char dst_atomic[2]; +extern _Atomic const char src_atomic[2]; + +// CHECK-LABEL: atomic_array_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i32 1) +void atomic_array_srcdst_cpy_char(size_t size) { __builtin_overloaded_memcpy(dst_atomic, src_atomic, size); } + +// CHECK-LABEL: atomic_local_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 4 %{{[0-9]*}}, i8* align 4 %{{[0-9]*}}, i64 4, i32 4) +void atomic_local_srcdst_cpy_char(size_t size) { + _Atomic int dst; + _Atomic const int src; + __builtin_overloaded_memcpy(&dst, &src, sizeof(dst)); +} + +// CHECK-LABEL: vla_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9a-z]*}}, i8* align 1 %{{[0-9a-z]*}}, i64 %{{[0-9]*}}, i1 true) +void vla_srcdst_cpy_char(size_t size) { + volatile char dst[size]; + const volatile char src[size]; + __builtin_overloaded_memcpy(dst, src, size); +} + +// CHECK-LABEL: static_srcdst_cpy_char( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %{{[0-9]*}}, i8* align 1 %{{[0-9]*}}, i64 %{{[0-9]*}}, i1 false) +void static_srcdst_cpy_char(char dst[static 42], const char src[static 42], size_t size) { + __builtin_overloaded_memcpy(dst, src, size); +} + +extern char dst_unsized[]; +extern volatile char dst_vunsized[]; +extern const char src_cunsized[]; +extern const volatile char src_cvunsized[]; + +// CHECK-LABEL: array_volatile_unsized_dst_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void array_volatile_unsized_dst_cpy(size_t size) { __builtin_overloaded_memcpy(dst_vunsized, src_cunsized, size); } + +// CHECK-LABEL: array_volatile_unsized_src_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void array_volatile_unsized_src_cpy(size_t size) { __builtin_overloaded_memcpy(dst_unsized, src_cvunsized, size); } + +// CHECK-LABEL: array_volatile_unsized_dstsrc_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr {{.*}}, i8* align 1 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void array_volatile_unsized_dstsrc_cpy(size_t size) { __builtin_overloaded_memcpy(dst_vunsized, src_cvunsized, size); } + +extern __attribute__((aligned(128))) char dst_512[512]; +extern __attribute__((aligned(128))) volatile char dst_v512[512]; +extern __attribute__((aligned(128))) const char src_c512[512]; +extern __attribute__((aligned(128))) const volatile char src_cv512[512]; + +// CHECK-LABEL: array_volatile_dst_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void array_volatile_dst_cpy(size_t size) { __builtin_overloaded_memcpy(dst_v512, src_c512, size); } + +// CHECK-LABEL: array_volatile_src_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void array_volatile_src_cpy(size_t size) { __builtin_overloaded_memcpy(dst_512, src_cv512, size); } + +// CHECK-LABEL: array_volatile_dstsrc_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void array_volatile_dstsrc_cpy(size_t size) { __builtin_overloaded_memcpy(dst_v512, src_cv512, size); } + +extern __attribute__((aligned(128))) volatile char dst_v512_32[512][32]; +extern __attribute__((aligned(128))) const volatile char src_cv512_32[512][32]; + +// CHECK-LABEL: multiarray_volatile_dstsrc_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 getelementptr {{.*}}, i8* align 128 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void multiarray_volatile_dstsrc_cpy(size_t size) { __builtin_overloaded_memcpy(dst_v512_32, src_cv512_32, size); } + +// CHECK-LABEL: multiarray_idx_volatile_dstsrc_cpy( +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 getelementptr {{.*}}, i8* align 32 getelementptr {{.*}}, i64 %{{[0-9]*}}, i1 true) +void multiarray_idx_volatile_dstsrc_cpy(size_t size) { __builtin_overloaded_memcpy(dst_v512_32[1], src_cv512_32[1], size); } diff --git a/clang/test/CodeGenObjC/builtin-memfns.m b/clang/test/CodeGenObjC/builtin-memfns.m --- a/clang/test/CodeGenObjC/builtin-memfns.m +++ b/clang/test/CodeGenObjC/builtin-memfns.m @@ -1,10 +1,38 @@ // RUN: %clang_cc1 -triple x86_64-apple-macosx10.8.0 -emit-llvm -o - %s | FileCheck %s -void *memcpy(void *restrict s1, const void *restrict s2, unsigned long n); +typedef __SIZE_TYPE__ size_t; + +void *memcpy(void *restrict s1, const void *restrict s2, size_t n); +void *memmove(void *restrict s1, const void *restrict s2, size_t n); +void *memset(void *s1, int v, size_t n); // PR13697 -void test1(int *a, id b) { - // CHECK: @test1 +void cpy1(int *a, id b) { + // CHECK-LABEL: @cpy1( + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false) + memcpy(a, b, 8); +} + +void cpy2(id a, int *b) { + // CHECK-LABEL: @cpy2( // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false) memcpy(a, b, 8); } + +void move1(int *a, id b) { + // CHECK-LABEL: @move1( + // CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false) + memmove(a, b, 8); +} + +void move2(id a, int *b) { + // CHECK-LABEL: @move2( + // CHECK: call void @llvm.memmove.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i1 false) + memmove(a, b, 8); +} + +void set(id a) { + // CHECK-LABEL: @set( + // CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 42, i64 8, i1 false) + memset(a, 42, 8); +} diff --git a/clang/test/Sema/builtin-overloaded-memfns.cpp b/clang/test/Sema/builtin-overloaded-memfns.cpp new file mode 100644 --- /dev/null +++ b/clang/test/Sema/builtin-overloaded-memfns.cpp @@ -0,0 +1,158 @@ +// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=arm64-unknown-unknown -fms-extensions -DCPY=1 +// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=arm64-unknown-unknown -fms-extensions -DCPY=0 + +// Test memcpy and memmove with the same code, since they're basically the same constraints. +#if CPY +#define MEM(...) __builtin_overloaded_memcpy(__VA_ARGS__) +#else +#define MEM(...) __builtin_overloaded_memmove(__VA_ARGS__) +#endif + +#define NULL (void *)0 +#define nullptr __nullptr +using size_t = __SIZE_TYPE__; +struct Intish { + int i; +}; +struct TrivialCpy { + char buf[8]; + TrivialCpy(); + TrivialCpy(const TrivialCpy &) = default; +}; +struct NotTrivialCpy { + char buf[8]; + NotTrivialCpy(); + NotTrivialCpy(const NotTrivialCpy &); +}; + +void arg_count() { + MEM(); // expected-error {{too few arguments to function call, expected 3, have 0}} + MEM(0); // expected-error {{too few arguments to function call, expected 3, have 1}} + MEM(0, 0); // expected-error {{too few arguments to function call, expected 3, have 2}} + MEM(0, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}} + __builtin_overloaded_memset(); // expected-error {{too few arguments to function call, expected 3, have 0}} + __builtin_overloaded_memset(0); // expected-error {{too few arguments to function call, expected 3, have 1}} + __builtin_overloaded_memset(0, 0); // expected-error {{too few arguments to function call, expected 3, have 2}} + __builtin_overloaded_memset(0, 0, 0, 0); // expected-error {{too many arguments to function call, expected 3, have 4}} +} + +void null(char *dst, const char *src, size_t size) { + MEM(0, 0, 0); + MEM(0, 0, size); + __builtin_overloaded_memset(0, 0, 0); + __builtin_overloaded_memset(0, 0, size); + MEM(dst, 0, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + MEM(dst, 0, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + MEM(dst, NULL, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + MEM(dst, nullptr, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + MEM(0, src, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + MEM(NULL, src, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + MEM(nullptr, src, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + __builtin_overloaded_memset(0, 0, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + __builtin_overloaded_memset(NULL, 0, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} + __builtin_overloaded_memset(nullptr, 0, 42); // expected-warning {{null passed to a callee that requires a non-null argument}} +} + +void arg_types(char *dst, const char *src, size_t size) { + MEM(dst, src, 0); + MEM(dst, dst, ~(size_t)0); + MEM(dst, src, 42); + MEM(dst, src, size); + MEM(dst, (char *)src, size); + MEM(dst, (const void *)src, size); + MEM((void *)dst, src, size); + MEM(dst, (volatile const char *)src, size); + MEM((volatile char *)dst, src, size); + MEM(dst, (__unaligned const char *)src, size); + MEM((__unaligned char *)dst, src, size); + MEM(dst, (const char *__restrict)src, size); + MEM((char *__restrict)dst, src, size); + MEM(dst, (_Atomic const char *)src, size); + MEM((_Atomic char *)dst, src, size); + MEM((int *)dst, (_Atomic const Intish *)src, size); + MEM((_Atomic Intish *)dst, (const int *)src, size); + MEM((void *)dst, (_Atomic const int *)src, size); + MEM((_Atomic int *)dst, (const void *)src, size); + MEM(dst, (const __attribute__((address_space(32))) char *)src, size); + MEM((__attribute__((address_space(32))) char *)dst, src, size); + MEM((__attribute__((address_space(32))) char *)dst, (const __attribute__((address_space(64))) char *)src, size); + MEM(dst, (__attribute__((address_space(32))) __unaligned const volatile void *__restrict)src, size); + MEM((__attribute__((address_space(32))) __unaligned volatile void *__restrict)dst, src, size); + + __builtin_overloaded_memset(dst, 0, 0); + __builtin_overloaded_memset(dst, 0, ~(size_t)0); + __builtin_overloaded_memset(dst, 0, 42); + __builtin_overloaded_memset(dst, 0, size); + __builtin_overloaded_memset((void *)dst, 0, size); + __builtin_overloaded_memset((volatile char *)dst, 0, size); + __builtin_overloaded_memset((__unaligned char *)dst, 0, size); + __builtin_overloaded_memset((_Atomic char *)dst, 0, size); + __builtin_overloaded_memset((int *)dst, 0, size); + __builtin_overloaded_memset((_Atomic Intish *)dst, 0, size); + __builtin_overloaded_memset((__attribute__((address_space(32))) char *)dst, 0, size); + __builtin_overloaded_memset((__attribute__((address_space(32))) __unaligned volatile void *)dst, 0, size); + + MEM(dst, 42, size); // expected-error {{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}} + MEM(42, src, size); // expected-error {{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}} + MEM(dst, src, dst); // expected-error {{cannot initialize a parameter of type 'unsigned long' with an lvalue of type 'char *'}} + MEM((const char *)dst, src, size); // expected-error {{argument must be non-const, got 'const char'}} + MEM((__attribute__((address_space(32))) __unaligned const volatile char *)dst, src, size); // expected-error {{argument must be non-const, got 'const volatile __unaligned __attribute__((address_space(32))) char'}} + MEM(dst, (volatile _Atomic const char *)src, size); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('char' and 'const volatile _Atomic(char)' cannot have both _Atomic and volatile)}} + MEM((volatile _Atomic char *)dst, src, size); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' and 'const char' cannot have both _Atomic and volatile)}} + MEM((volatile _Atomic char *)dst, (_Atomic const char *)src, size); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' and 'const _Atomic(char)' cannot have both _Atomic and volatile)}} + MEM((_Atomic char *)dst, (volatile _Atomic const char *)src, size); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('_Atomic(char)' and 'const volatile _Atomic(char)' cannot have both _Atomic and volatile)}} + MEM(dst, (_Atomic const int *)src, size); // expected-error{{_Atomic sizes must match, 'char' is 1 bytes and 'const _Atomic(int)' is 4 bytes}} + MEM((_Atomic int *)dst, src, size); // expected-error{{_Atomic sizes must match, '_Atomic(int)' is 4 bytes and 'const char' is 1 bytes}} + + __builtin_overloaded_memset(42, 0, size); // expected-error {{cannot initialize a parameter of type 'void *' with an rvalue of type 'int'}} + __builtin_overloaded_memset((const char *)dst, 0, size); // expected-error {{argument must be non-const, got 'const char'}} + __builtin_overloaded_memset((__attribute__((address_space(32))) __unaligned const volatile char *)dst, 0, size); // expected-error {{argument must be non-const, got 'const volatile __unaligned __attribute__((address_space(32))) char'}} + __builtin_overloaded_memset((volatile _Atomic char *)dst, 0, size); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' cannot have both _Atomic and volatile)}} + + extern char adst[512]; + extern volatile char avdst[512]; + extern const char asrc[512]; + extern const volatile char avsrc[512]; + + MEM(adst, asrc, sizeof(adst)); + MEM(avdst, avsrc, sizeof(avdst)); + MEM(asrc, asrc, sizeof(adst)); // expected-error {{argument must be non-const, got 'const char'}} + MEM(adst, asrc, sizeof(adst) + 1); // TODO diagnose size overflow? + __builtin_overloaded_memset(adst, 0, sizeof(adst)); + __builtin_overloaded_memset(avdst, 0, sizeof(avdst)); + __builtin_overloaded_memset(asrc, 0, sizeof(asrc)); // expected-error {{argument must be non-const, got 'const char'}} + __builtin_overloaded_memset(adst, 0, sizeof(adst) + 1); // TODO diagnose size overflow? + + extern _Atomic char aadst[512]; + extern volatile _Atomic char aavdst[512]; + extern const _Atomic char aasrc[512]; + extern const _Atomic volatile char aavsrc[512]; + + MEM(aadst, aasrc, sizeof(aadst)); + MEM(aavdst, aasrc, sizeof(aadst)); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' and 'const _Atomic(char)' cannot have both _Atomic and volatile)}} + MEM(aadst, aavsrc, sizeof(aadst)); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('_Atomic(char)' and 'const volatile _Atomic(char)' cannot have both _Atomic and volatile)}} + __builtin_overloaded_memset(aadst, 0, sizeof(aadst)); + __builtin_overloaded_memset(aavdst, 0, sizeof(aavdst)); // expected-error{{mixing _Atomic and volatile qualifiers is unsupported ('volatile _Atomic(char)' cannot have both _Atomic and volatile)}} + + TrivialCpy trivialDst; + const TrivialCpy trivialSrc; + MEM(&trivialDst, &trivialSrc, sizeof(TrivialCpy)); + MEM((__attribute__((address_space(32))) __unaligned volatile TrivialCpy * __restrict) & trivialDst, (__attribute__((address_space(64))) __unaligned const volatile TrivialCpy *__restrict) & trivialSrc, sizeof(TrivialCpy)); + __builtin_overloaded_memset(&trivialDst, 0, sizeof(trivialDst)); + __builtin_overloaded_memset((__attribute__((address_space(32))) __unaligned volatile TrivialCpy * __restrict) & trivialDst, 0, sizeof(trivialDst)); + + TrivialCpy trivialDstArr[2]; + const TrivialCpy trivialSrcArr[2]; + MEM(trivialDstArr, trivialSrcArr, sizeof(TrivialCpy) * 2); + __builtin_overloaded_memset(trivialDstArr, 0, sizeof(TrivialCpy) * 2); + + NotTrivialCpy notTrivialDst; + const NotTrivialCpy notTrivialSrc; + MEM(¬TrivialDst, ¬TrivialSrc, sizeof(NotTrivialCpy)); // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}} + __builtin_overloaded_memset(¬TrivialDst, 0, sizeof(NotTrivialCpy)); // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}} + + NotTrivialCpy notTrivialDstArr[2]; + const NotTrivialCpy notTrivialSrcArr[2]; + MEM(notTrivialDstArr, notTrivialSrcArr, sizeof(NotTrivialCpy) * 2); // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}} + __builtin_overloaded_memset(notTrivialDstArr, 0, sizeof(NotTrivialCpy) * 2); // expected-error{{address argument must be a pointer to a trivially-copyable type ('NotTrivialCpy' invalid)}} +} diff --git a/clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl b/clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl --- a/clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl +++ b/clang/test/SemaOpenCL/invalid-pipe-builtin-cl2.0.cl @@ -10,7 +10,7 @@ read_pipe(p, &tmp); read_pipe(p, ptr); read_pipe(tmp, p); // expected-error {{first argument to 'read_pipe' must be a pipe type}} - read_pipe(p); // expected-error {{invalid number of arguments to function: 'read_pipe'}} + read_pipe(p); // expected-error {{too few arguments to function call, expected 2 or 4, have 1}} read_pipe(p, rid, tmp, ptr); read_pipe(p, tmp, tmp, ptr); // expected-error {{invalid argument type to function 'read_pipe' (expecting 'reserve_id_t' having '__private int')}} read_pipe(p, rid, rid, ptr); // expected-error {{invalid argument type to function 'read_pipe' (expecting 'unsigned int' having '__private reserve_id_t')}} @@ -39,7 +39,7 @@ write_pipe(p, &tmp); write_pipe(p, ptr); write_pipe(tmp, p); // expected-error {{first argument to 'write_pipe' must be a pipe type}} - write_pipe(p); // expected-error {{invalid number of arguments to function: 'write_pipe'}} + write_pipe(p); // expected-error {{too few arguments to function call, expected 2 or 4, have 1}} write_pipe(p, rid, tmp, ptr); write_pipe(p, tmp, tmp, ptr); // expected-error {{invalid argument type to function 'write_pipe' (expecting 'reserve_id_t' having '__private int')}} write_pipe(p, rid, rid, ptr); // expected-error {{invalid argument type to function 'write_pipe' (expecting 'unsigned int' having '__private reserve_id_t')}} diff --git a/clang/test/SemaOpenCL/to_addr_builtin.cl b/clang/test/SemaOpenCL/to_addr_builtin.cl --- a/clang/test/SemaOpenCL/to_addr_builtin.cl +++ b/clang/test/SemaOpenCL/to_addr_builtin.cl @@ -15,7 +15,7 @@ // expected-error@-2{{implicit declaration of function 'to_global' is invalid in OpenCL}} // expected-warning@-3{{incompatible integer to pointer conversion assigning to '__global int *__private' from 'int'}} #else - // expected-error@-5{{invalid number of arguments to function: 'to_global'}} + // expected-error@-5{{too many arguments to function call, expected 1, have 2}} #endif int x;