Index: include/clang/Basic/Attr.td =================================================================== --- include/clang/Basic/Attr.td +++ include/clang/Basic/Attr.td @@ -745,6 +745,15 @@ let Documentation = [Undocumented]; } +def AllocSize : InheritableAttr { + let Spellings = [GCC<"alloc_size">]; + let Subjects = SubjectList<[HasFunctionProto], WarnDiag, + "ExpectedFunctionWithProtoType">; + let Args = [IntArgument<"ElemSizeParam">, IntArgument<"NumElemsParam", 1>]; + let TemplateDependent = 1; + let Documentation = [AllocSizeDocs]; +} + def EnableIf : InheritableAttr { let Spellings = [GNU<"enable_if">]; let Subjects = SubjectList<[Function]>; Index: include/clang/Basic/AttrDocs.td =================================================================== --- include/clang/Basic/AttrDocs.td +++ include/clang/Basic/AttrDocs.td @@ -178,6 +178,44 @@ }]; } +def AllocSizeDocs : Documentation { + let Category = DocCatFunction; + let Content = [{ +The ``alloc_size`` attribute can be placed on functions that return pointers in +order to hint to the compiler how many bytes of memory will be available at the +returned poiner. ``alloc_size`` takes one or two arguments. + +- ``alloc_size(N)`` implies that argument number N equals the number of + available bytes at the returned pointer. +- ``alloc_size(N, M)`` implies that the product of argument number N and + argument number M equals the number of available bytes at the returned + pointer. + +Argument numbers are 1-based. + +An example of how to use ``alloc_size`` + +.. code-block:: c + + void *my_malloc(int a) __attribute__((alloc_size(1))); + void *my_calloc(int a, int b) __attribute__((alloc_size(1, 2))); + + int main() { + void *const p = my_malloc(100); + assert(__builtin_object_size(p, 0) == 100); + void *const a = my_calloc(20, 5); + assert(__builtin_object_size(a, 0) == 100); + } + +.. Note:: This attribute works differently in clang than it does in GCC. + Specifically, clang will only trace ``const`` pointers (as above); we give up + on pointers that are not marked as ``const``. In the vast majority of cases, + this is unimportant, because LLVM has support for the ``alloc_size`` + attribute. However, this may cause mildly unintuitive behavior when used with + other attributes, such as ``enable_if``. + }]; +} + def EnableIfDocs : Documentation { let Category = DocCatFunction; let Content = [{ Index: include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- include/clang/Basic/DiagnosticSemaKinds.td +++ include/clang/Basic/DiagnosticSemaKinds.td @@ -2183,6 +2183,9 @@ "%0 attribute only applies to%select{| constant}1 pointer arguments">, InGroup; def err_attribute_pointers_only : Error; +def err_attribute_integers_only : Error< + "%0 attribute argument may only refer to a function parameter of integer " + "type">; def warn_attribute_return_pointers_only : Warning< "%0 attribute only applies to return values that are pointers">, InGroup; Index: lib/AST/ExprConstant.cpp =================================================================== --- lib/AST/ExprConstant.cpp +++ lib/AST/ExprConstant.cpp @@ -110,19 +110,57 @@ return getAsBaseOrMember(E).getInt(); } + /// Given a CallExpr, try to get the alloc_size attribute. May return null. + static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) { + const FunctionDecl *Callee = CE->getDirectCallee(); + return Callee ? Callee->getAttr() : nullptr; + } + + /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr. + /// This will look through a single cast. + /// + /// Returns null if we couldn't unwrap a function with alloc_size. + static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) { + if (!E->getType()->isPointerType()) + return nullptr; + + E = E->IgnoreParens(); + // If we're doing a variable assignment from e.g. malloc(N), there will + // probably be a cast of some kind. Ignore it. + if (const auto *Cast = dyn_cast(E)) + E = Cast->getSubExpr()->IgnoreParens(); + + if (const auto *CE = dyn_cast(E)) + return getAllocSizeAttr(CE) ? CE : nullptr; + return nullptr; + } + + /// Determines whether or not the given Base contains a call to a function + /// with the alloc_size attribute. + static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) { + const auto *E = Base.dyn_cast(); + return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E); + } + + /// Determines if an LValue with the given LValueBase will have an unsized + /// array in its designator. /// Find the path length and type of the most-derived subobject in the given /// path, and find the size of the containing array, if any. - static - unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base, - ArrayRef Path, - uint64_t &ArraySize, QualType &Type, - bool &IsArray) { + static unsigned + findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base, + ArrayRef Path, + uint64_t &ArraySize, QualType &Type, bool &IsArray) { + // This only accepts LValueBases from APValues, and APValues don't support + // arrays that lack size info. + assert(!isBaseAnAllocSizeCall(Base) && + "Unsized arrays shouldn't appear here"); unsigned MostDerivedLength = 0; - Type = Base; + Type = getType(Base); + for (unsigned I = 0, N = Path.size(); I != N; ++I) { if (Type->isArrayType()) { const ConstantArrayType *CAT = - cast(Ctx.getAsArrayType(Type)); + cast(Ctx.getAsArrayType(Type)); Type = CAT->getElementType(); ArraySize = CAT->getSize().getZExtValue(); MostDerivedLength = I + 1; @@ -163,17 +201,23 @@ /// Is this a pointer one past the end of an object? bool IsOnePastTheEnd : 1; + /// Indicator of whether the first entry is an unsized array. + bool FirstEntryIsAnUnsizedArray : 1; + /// Indicator of whether the most-derived object is an array element. bool MostDerivedIsArrayElement : 1; /// The length of the path to the most-derived object of which this is a /// subobject. - unsigned MostDerivedPathLength : 29; + unsigned MostDerivedPathLength : 28; /// The size of the array of which the most-derived object is an element. /// This will always be 0 if the most-derived object is not an array /// element. 0 is not an indicator of whether or not the most-derived object /// is an array, however, because 0-length arrays are allowed. + /// + /// If the current array is an unsized array, the value of this is + /// undefined. uint64_t MostDerivedArraySize; /// The type of the most derived object referred to by this address. @@ -188,23 +232,24 @@ explicit SubobjectDesignator(QualType T) : Invalid(false), IsOnePastTheEnd(false), - MostDerivedIsArrayElement(false), MostDerivedPathLength(0), - MostDerivedArraySize(0), MostDerivedType(T) {} + FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false), + MostDerivedPathLength(0), MostDerivedArraySize(0), + MostDerivedType(T) {} SubobjectDesignator(ASTContext &Ctx, const APValue &V) : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false), - MostDerivedIsArrayElement(false), MostDerivedPathLength(0), - MostDerivedArraySize(0) { + FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false), + MostDerivedPathLength(0), MostDerivedArraySize(0) { + assert(V.isLValue() && "Non-LValue used to make an LValue designator?"); if (!Invalid) { IsOnePastTheEnd = V.isLValueOnePastTheEnd(); ArrayRef VEntries = V.getLValuePath(); Entries.insert(Entries.end(), VEntries.begin(), VEntries.end()); if (V.getLValueBase()) { bool IsArray = false; - MostDerivedPathLength = - findMostDerivedSubobject(Ctx, getType(V.getLValueBase()), - V.getLValuePath(), MostDerivedArraySize, - MostDerivedType, IsArray); + MostDerivedPathLength = findMostDerivedSubobject( + Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize, + MostDerivedType, IsArray); MostDerivedIsArrayElement = IsArray; } } @@ -215,12 +260,25 @@ Entries.clear(); } + /// Determine whether the most derived subobject is an array without a + /// known bound. + bool isMostDerivedAnUnsizedArray() const { + return FirstEntryIsAnUnsizedArray && Entries.size() == 1; + } + + /// Determine what the most derived array's size is. Results in an assertion + /// failure if the most derived array lacks a size. + uint64_t getMostDerivedArraySize() const { + assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size"); + return MostDerivedArraySize; + } + /// Determine whether this is a one-past-the-end pointer. bool isOnePastTheEnd() const { assert(!Invalid); if (IsOnePastTheEnd) return true; - if (MostDerivedIsArrayElement && + if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement && Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize) return true; return false; @@ -248,6 +306,21 @@ MostDerivedArraySize = CAT->getSize().getZExtValue(); MostDerivedPathLength = Entries.size(); } + /// Update this designator to refer to the first element within the array of + /// elements of type T. This is an array of unknown size. + void addUnsizedArrayUnchecked(QualType ElemTy) { + PathEntry Entry; + Entry.ArrayIndex = 0; + Entries.push_back(Entry); + + MostDerivedType = ElemTy; + MostDerivedIsArrayElement = true; + // The value in MostDerivedArraySize is undefined in this case. So, set it + // to an arbitrary value that's likely to loudly break things if it's + // used. + MostDerivedArraySize = std::numeric_limits::max() / 2; + MostDerivedPathLength = Entries.size(); + } /// Update this designator to refer to the given base or member of this /// object. void addDeclUnchecked(const Decl *D, bool Virtual = false) { @@ -281,10 +354,16 @@ /// Add N to the address of this subobject. void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) { if (Invalid) return; + if (isMostDerivedAnUnsizedArray()) { + // Can't verify -- trust that the user is doing the right thing (or if + // not, trust that the caller will catch the bad behavior). + Entries.back().ArrayIndex += N; + return; + } if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement) { Entries.back().ArrayIndex += N; - if (Entries.back().ArrayIndex > MostDerivedArraySize) { + if (Entries.back().ArrayIndex > getMostDerivedArraySize()) { diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex); setInvalid(); } @@ -518,9 +597,15 @@ /// gets a chance to look at it. EM_PotentialConstantExpressionUnevaluated, - /// Evaluate as a constant expression. Continue evaluating if we find a - /// MemberExpr with a base that can't be evaluated. - EM_DesignatorFold, + /// Evaluate as a constant expression. Continue evaluating if either: + /// - We find a MemberExpr with a base that can't be evaluated. + /// - We find a variable initialized with a call to a function that has + /// the alloc_size attribute on it. + /// In either case, the LValue returned shall have an invalid base; in the + /// former, the base will be the invalid MemberExpr, in the latter, the + /// base will be either the alloc_size CallExpr or a CastExpr wrapping + /// said CallExpr. + EM_OffsetFold, } EvalMode; /// Are we checking whether the expression is a potential constant @@ -622,7 +707,7 @@ case EM_PotentialConstantExpression: case EM_ConstantExpressionUnevaluated: case EM_PotentialConstantExpressionUnevaluated: - case EM_DesignatorFold: + case EM_OffsetFold: HasActiveDiagnostic = false; return OptionalDiagnostic(); } @@ -703,7 +788,7 @@ case EM_ConstantExpression: case EM_ConstantExpressionUnevaluated: case EM_ConstantFold: - case EM_DesignatorFold: + case EM_OffsetFold: return false; } llvm_unreachable("Missed EvalMode case"); @@ -722,7 +807,7 @@ case EM_EvaluateForOverflow: case EM_IgnoreSideEffects: case EM_ConstantFold: - case EM_DesignatorFold: + case EM_OffsetFold: return true; case EM_PotentialConstantExpression: @@ -758,15 +843,13 @@ case EM_ConstantExpressionUnevaluated: case EM_ConstantFold: case EM_IgnoreSideEffects: - case EM_DesignatorFold: + case EM_OffsetFold: return false; } llvm_unreachable("Missed EvalMode case"); } - bool allowInvalidBaseExpr() const { - return EvalMode == EM_DesignatorFold; - } + bool allowInvalidBaseExpr() const { return EvalMode == EM_OffsetFold; } }; /// Object used to treat all foldable expressions as constant expressions. @@ -802,11 +885,10 @@ struct FoldOffsetRAII { EvalInfo &Info; EvalInfo::EvaluationMode OldMode; - explicit FoldOffsetRAII(EvalInfo &Info, bool Subobject) + explicit FoldOffsetRAII(EvalInfo &Info) : Info(Info), OldMode(Info.EvalMode) { if (!Info.checkingPotentialConstantExpression()) - Info.EvalMode = Subobject ? EvalInfo::EM_DesignatorFold - : EvalInfo::EM_ConstantFold; + Info.EvalMode = EvalInfo::EM_OffsetFold; } ~FoldOffsetRAII() { Info.EvalMode = OldMode; } @@ -884,10 +966,12 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N) { + // If we're complaining, we must be able to statically determine the size of + // the most derived array. if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement) Info.CCEDiag(E, diag::note_constexpr_array_index) << static_cast(N) << /*array*/ 0 - << static_cast(MostDerivedArraySize); + << static_cast(getMostDerivedArraySize()); else Info.CCEDiag(E, diag::note_constexpr_array_index) << static_cast(N) << /*non-array*/ 1; @@ -1007,12 +1091,16 @@ void moveInto(APValue &V) const { if (Designator.Invalid) V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex); - else + else { + assert(!InvalidBase && "APValues can't handle invalid LValue bases"); + assert(!Designator.FirstEntryIsAnUnsizedArray && + "Unsized array with a valid base?"); V = APValue(Base, Offset, Designator.Entries, Designator.IsOnePastTheEnd, CallIndex); + } } void setFrom(ASTContext &Ctx, const APValue &V) { - assert(V.isLValue()); + assert(V.isLValue() && "Setting LValue from a non-LValue?"); Base = V.getLValueBase(); Offset = V.getLValueOffset(); InvalidBase = false; @@ -1021,6 +1109,15 @@ } void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false) { +#ifndef NDEBUG + // We only allow a few types of invalid bases. Enforce that here. + if (BInvalid) { + const auto *E = B.get(); + assert((isa(E) || tryUnwrapAllocSizeCall(E)) && + "Unexpected type of invalid base"); + } +#endif + Base = B; Offset = CharUnits::Zero(); InvalidBase = BInvalid; @@ -1059,6 +1156,13 @@ if (checkSubobject(Info, E, isa(D) ? CSK_Field : CSK_Base)) Designator.addDeclUnchecked(D, Virtual); } + void addUnsizedArray(EvalInfo &Info, QualType ElemTy) { + assert(Designator.Entries.empty() && getType(Base)->isPointerType()); + assert(isBaseAnAllocSizeCall(Base) && + "Only alloc_size bases can have unsized arrays"); + Designator.FirstEntryIsAnUnsizedArray = true; + Designator.addUnsizedArrayUnchecked(ElemTy); + } void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) { if (checkSubobject(Info, E, CSK_ArrayToPointer)) Designator.addArrayUnchecked(CAT); @@ -2682,7 +2786,7 @@ // All the remaining cases only permit reading. Info.Diag(E, diag::note_constexpr_modify_global); return CompleteObject(); - } else if (VD->isConstexpr()) { + } else if (VD->isConstexpr() || BaseType.isConstQualified()) { // OK, we can read this variable. } else if (BaseType->isIntegralOrEnumerationType()) { if (!BaseType.isConstQualified()) { @@ -4892,6 +4996,105 @@ // Pointer Evaluation //===----------------------------------------------------------------------===// +/// \brief Attempts to compute the number of bytes available at the pointer +/// returned by a function with the alloc_size attribute. Returns true if we +/// were successful. +/// +/// This expects the given CallExpr to be a call to a function with an +/// alloc_size attribute. +static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx, + const CallExpr *Call, + llvm::APInt &Result) { + const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call); + + // alloc_size args are 1-indexed, 0 means not present. + assert(AllocSize && AllocSize->getElemSizeParam() != 0); + unsigned SizeArgNo = AllocSize->getElemSizeParam() - 1; + unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType()); + if (Call->getNumArgs() <= SizeArgNo) + return false; + + auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) { + if (!E->EvaluateAsInt(Into, Ctx, Expr::SE_AllowSideEffects)) + return false; + if (Into.isNegative() || !Into.isIntN(BitsInSizeT)) + return false; + Into = Into.zextOrSelf(BitsInSizeT); + return true; + }; + + APSInt SizeOfElem; + if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem)) + return false; + + if (!AllocSize->getNumElemsParam()) { + Result = std::move(SizeOfElem); + return true; + } + + APSInt NumberOfElems; + // Argument numbers start at 1 + unsigned NumArgNo = AllocSize->getNumElemsParam() - 1; + if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems)) + return false; + + bool Overflow; + llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow); + if (Overflow || BytesAvailable.isNegative()) + return false; + + Result = std::move(BytesAvailable); + return true; +} + +/// \brief Convenience function. LVal's base must be a call to an alloc_size +/// function. +static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx, + const LValue &LVal, + llvm::APInt &Result) { + assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) && + "Can't get the size of a non alloc_size function"); + const auto *Base = LVal.getLValueBase().get(); + const CallExpr *CE = tryUnwrapAllocSizeCall(Base); + return getBytesReturnedByAllocSizeCall(Ctx, CE, Result); +} + +/// \brief Attempts to evaluate the given LValueBase as the result of a call to +/// a function with the alloc_size attribute. If it was possible to do so, this +/// function will return true, make Result's Base point to said function call, +/// and mark Result's Base as invalid. +static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base, + LValue &Result) { + if (!Info.allowInvalidBaseExpr() || Base.isNull()) + return false; + + // Because we do no form of static analysis, we only support const variables. + // + // Additionally, we can't support parameters, nor can we support static + // variables (in the latter case, use-before-assign isn't UB; in the former, + // we have no clue what they'll be assigned to). + const auto *VD = + dyn_cast_or_null(Base.dyn_cast()); + if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified()) + return false; + + const Expr *Init = VD->getAnyInitializer(); + if (!Init) + return false; + + const Expr *E = Init->IgnoreParens(); + if (!tryUnwrapAllocSizeCall(E)) + return false; + + // Store E instead of E unwrapped so that the type of the LValue's base is + // what the user wanted. + Result.setInvalid(E); + + QualType Pointee = E->getType()->castAs()->getPointeeType(); + Result.addUnsizedArray(Info, Pointee); + return true; +} + namespace { class PointerExprEvaluator : public ExprEvaluatorBase { @@ -5077,6 +5280,19 @@ case CK_FunctionToPointerDecay: return EvaluateLValue(SubExpr, Result, Info); + + case CK_LValueToRValue: { + LValue LVal; + if (!EvaluateLValue(E->getSubExpr(), LVal, Info)) + return false; + + APValue RVal; + // Note, we use the subexpression's type in order to retain cv-qualifiers. + if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(), + LVal, RVal)) + return evaluateLValueAsAllocSize(Info, LVal.Base, Result); + return Success(RVal, E); + } } return ExprEvaluatorBaseTy::VisitCastExpr(E); @@ -5187,7 +5403,16 @@ return true; } default: - return ExprEvaluatorBaseTy::VisitCallExpr(E); + if (ExprEvaluatorBaseTy::VisitCallExpr(E)) + return true; + if (Info.allowInvalidBaseExpr() && getAllocSizeAttr(E)) { + Result.setInvalid(E); + QualType PointeeTy = + E->getType()->castAs()->getPointeeType(); + Result.addUnsizedArray(Info, PointeeTy); + return true; + } + return false; } } @@ -6175,8 +6400,6 @@ bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E); bool VisitSizeOfPackExpr(const SizeOfPackExpr *E); -private: - bool TryEvaluateBuiltinObjectSize(const CallExpr *E, unsigned Type); // FIXME: Missing: array subscript of vector, member of vector }; } // end anonymous namespace @@ -6448,7 +6671,7 @@ } /// A more selective version of E->IgnoreParenCasts for -/// TryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only +/// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only /// to change the type of E. /// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo` /// @@ -6508,38 +6731,50 @@ } } + unsigned I = 0; QualType BaseType = getType(Base); - for (int I = 0, E = LVal.Designator.Entries.size(); I != E; ++I) { + if (LVal.Designator.FirstEntryIsAnUnsizedArray) { + assert(isBaseAnAllocSizeCall(Base) && + "Unsized array in non-alloc_size call?"); + // If this is an alloc_size base, we should ignore the initial array index + ++I; + BaseType = BaseType->castAs()->getPointeeType(); + } + + for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) { + const auto &Entry = LVal.Designator.Entries[I]; if (BaseType->isArrayType()) { // Because __builtin_object_size treats arrays as objects, we can ignore // the index iff this is the last array in the Designator. if (I + 1 == E) return true; - auto *CAT = cast(Ctx.getAsArrayType(BaseType)); - uint64_t Index = LVal.Designator.Entries[I].ArrayIndex; + const auto *CAT = cast(Ctx.getAsArrayType(BaseType)); + uint64_t Index = Entry.ArrayIndex; if (Index + 1 != CAT->getSize()) return false; BaseType = CAT->getElementType(); } else if (BaseType->isAnyComplexType()) { - auto *CT = BaseType->castAs(); - uint64_t Index = LVal.Designator.Entries[I].ArrayIndex; + const auto *CT = BaseType->castAs(); + uint64_t Index = Entry.ArrayIndex; if (Index != 1) return false; BaseType = CT->getElementType(); - } else if (auto *FD = getAsField(LVal.Designator.Entries[I])) { + } else if (const FieldDecl *FD = getAsField(Entry)) { if (!IsLastFieldDecl(FD)) return false; BaseType = FD->getType(); } else { - assert(getAsBaseClass(LVal.Designator.Entries[I]) != nullptr && - "Expecting cast to a base class"); + assert(getAsBaseClass(Entry) && "Expecting cast to a base class"); return false; } } return true; } -/// Tests to see if the LValue has a designator (that isn't necessarily valid). +/// Tests to see if the LValue has a user-specified designator (that isn't +/// necessarily valid). Note that this always returns 'true' if the LValue has +/// an unsized array as its first designator entry, because there's currently no +/// way to tell if the user typed *foo or foo[0]. static bool refersToCompleteObject(const LValue &LVal) { if (LVal.Designator.Invalid || !LVal.Designator.Entries.empty()) return false; @@ -6547,42 +6782,144 @@ if (!LVal.InvalidBase) return true; - auto *E = LVal.Base.dyn_cast(); - (void)E; - assert(E != nullptr && isa(E)); - return false; + // If `E` is a MemberExpr, then the first part of the designator is hiding in + // the LValueBase. + const auto *E = LVal.Base.dyn_cast(); + return !E || !isa(E); +} + +/// Attempts to detect a user writing into a piece of memory that's impossible +/// to figure out the size of by just using types. +static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) { + const auto &Designator = LVal.Designator; + // Notes: + // - Users can only write off of the end when we have an invalid base. Invalid + // bases imply we don't know where the memory came from. + // - This idiom is used most often with arrays of size 0 or 1. Giving up on + // anything more than that can hurt accuracy a lot. + return LVal.InvalidBase && + Designator.Entries.size() == Designator.MostDerivedPathLength && + Designator.MostDerivedIsArrayElement && + Designator.getMostDerivedArraySize() < 2 && + isDesignatorAtObjectEnd(Ctx, LVal); +} + +/// Converts the given APInt to CharUnits, assuming the APInt is signed. +/// Fails if the conversion would cause loss of precision. +static bool convertAPIntToCharUnits(const llvm::APInt &Int, CharUnits &Result) { + uint64_t MaxValue = std::numeric_limits::max(); + if (Int.ugt(MaxValue)) + return false; + Result = CharUnits::fromQuantity(Int.getZExtValue()); + return true; } -/// Tries to evaluate the __builtin_object_size for @p E. If successful, returns -/// true and stores the result in @p Size. +/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will +/// determine how many bytes exist from the beginning of the object to either +/// the end of the current subobject, or the end of the object itself, depending +/// on what the LValue looks like + the value of Type. /// -/// If @p WasError is non-null, this will report whether the failure to evaluate -/// is to be treated as an Error in IntExprEvaluator. -static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, - EvalInfo &Info, uint64_t &Size, - bool *WasError = nullptr) { - if (WasError != nullptr) - *WasError = false; - - auto Error = [&](const Expr *E) { - if (WasError != nullptr) - *WasError = true; +/// If this returns false, the value of Result is undefined. +static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc, + unsigned Type, const LValue &LVal, + CharUnits &Result) { + // In the case where we're not dealing with a subobject in the expression, we + // can ignore the subobject bit. + // + // Additionally, as a special case, we can deal with LValues with a single + // array index in their designator as a complete object. (This simplifies + // subobject handling logic) + bool EvaluateAsCompleteObject = !(Type & 1) || refersToCompleteObject(LVal) || + LVal.Designator.isMostDerivedAnUnsizedArray(); + + // We can't give a correct lower bound for Type=3 if the designator is invalid + // and we're meant to be evaluating it. Give up. + if (Type == 3 && LVal.Designator.Invalid && !EvaluateAsCompleteObject) + return false; + + // We want to evaluate the size of the entire object. This is a valid fallback + // for when Type=1 and the designator is invalid, because we're asked for an + // upper-bound. + if (LVal.Designator.Invalid || EvaluateAsCompleteObject) { + llvm::APInt APResult; + if (isBaseAnAllocSizeCall(LVal.getLValueBase()) && + getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APResult)) + return convertAPIntToCharUnits(APResult, Result); + + if (LVal.InvalidBase) + return false; + + QualType BaseTy = getObjectType(LVal.getLValueBase()); + if (BaseTy.isNull()) + return false; + + return HandleSizeof(Info, ExprLoc, BaseTy, Result); + } + + // We want to evaluate the size of a subobject. + const SubobjectDesignator &Designator = LVal.Designator; + + // The following is a moderately common idiom in C: + // + // struct Foo { int a; char c[1]; }; + // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar)); + // strcpy(&F->c[0], Bar); + if (isUserWritingOffTheEnd(Info.Ctx, LVal)) { + // If we can resolve this to an alloc_size call, we can hand that back, + // because we know for certain how many bytes there are to write to. + llvm::APInt APResult; + if (isBaseAnAllocSizeCall(LVal.getLValueBase()) && + getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APResult)) + return convertAPIntToCharUnits(APResult, Result); + + // If we cannot determine the size of the initial allocation, then we need + // to give up for Type=1, because we can't accurately determine an + // upper-bound. However, we are still able to give conservative lower-bounds + // for Type=3. + if (Type == 1) + return false; + } + + CharUnits BytesPerElem; + if (!HandleSizeof(Info, ExprLoc, Designator.MostDerivedType, BytesPerElem)) return false; - }; - auto Success = [&](uint64_t S, const Expr *E) { - Size = S; + // According to the GCC documentation, we want the size of the subobject + // denoted by the pointer. But that's not quite right -- what we actually + // want is the size of the immediately-enclosing array, if there is one. + int64_t ElemsRemaining = 1; + if (Designator.MostDerivedIsArrayElement && + Designator.Entries.size() == Designator.MostDerivedPathLength) { + ElemsRemaining = Designator.getMostDerivedArraySize() - + Designator.Entries.back().ArrayIndex; + } else if (Designator.isOnePastTheEnd()) + ElemsRemaining = 0; + + if (ElemsRemaining <= 0) { + // We've gone passed the end -- pretend there were zero bytes. + Result = CharUnits::Zero(); return true; - }; + } + + Result = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining; + return true; +} +/// \brief Tries to evaluate the __builtin_object_size for @p E. If successful, +/// returns true and stores the result in @p Size. +/// +/// If @p WasError is non-null, this will report whether the failure to evaluate +/// is to be treated as an Error in IntExprEvaluator. +static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, + EvalInfo &Info, uint64_t &Size) { // Determine the denoted object. - LValue Base; + LValue LVal; { // The operand of __builtin_object_size is never evaluated for side-effects. // If there are any, but we can determine the pointed-to object anyway, then // ignore the side-effects. SpeculativeEvaluationRAII SpeculativeEval(Info); - FoldOffsetRAII Fold(Info, Type & 1); + FoldOffsetRAII Fold(Info); if (E->isGLValue()) { // It's possible for us to be given GLValues if we're called via @@ -6590,115 +6927,25 @@ APValue RVal; if (!EvaluateAsRValue(Info, E, RVal)) return false; - Base.setFrom(Info.Ctx, RVal); - } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), Base, Info)) + LVal.setFrom(Info.Ctx, RVal); + } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info)) return false; } - CharUnits BaseOffset = Base.getLValueOffset(); // If we point to before the start of the object, there are no accessible // bytes. - if (BaseOffset.isNegative()) - return Success(0, E); - - // In the case where we're not dealing with a subobject, we discard the - // subobject bit. - bool SubobjectOnly = (Type & 1) != 0 && !refersToCompleteObject(Base); - - // If Type & 1 is 0, we need to be able to statically guarantee that the bytes - // exist. If we can't verify the base, then we can't do that. - // - // As a special case, we produce a valid object size for an unknown object - // with a known designator if Type & 1 is 1. For instance: - // - // extern struct X { char buff[32]; int a, b, c; } *p; - // int a = __builtin_object_size(p->buff + 4, 3); // returns 28 - // int b = __builtin_object_size(p->buff + 4, 2); // returns 0, not 40 - // - // This matches GCC's behavior. - if (Base.InvalidBase && !SubobjectOnly) - return Error(E); - - // If we're not examining only the subobject, then we reset to a complete - // object designator - // - // If Type is 1 and we've lost track of the subobject, just find the complete - // object instead. (If Type is 3, that's not correct behavior and we should - // return 0 instead.) - LValue End = Base; - if (!SubobjectOnly || (End.Designator.Invalid && Type == 1)) { - QualType T = getObjectType(End.getLValueBase()); - if (T.isNull()) - End.Designator.setInvalid(); - else { - End.Designator = SubobjectDesignator(T); - End.Offset = CharUnits::Zero(); - } + if (LVal.getLValueOffset().isNegative()) { + Size = 0; + return true; } - // If it is not possible to determine which objects ptr points to at compile - // time, __builtin_object_size should return (size_t) -1 for type 0 or 1 - // and (size_t) 0 for type 2 or 3. - if (End.Designator.Invalid) - return false; - - // According to the GCC documentation, we want the size of the subobject - // denoted by the pointer. But that's not quite right -- what we actually - // want is the size of the immediately-enclosing array, if there is one. - int64_t AmountToAdd = 1; - if (End.Designator.MostDerivedIsArrayElement && - End.Designator.Entries.size() == End.Designator.MostDerivedPathLength) { - // We got a pointer to an array. Step to its end. - AmountToAdd = End.Designator.MostDerivedArraySize - - End.Designator.Entries.back().ArrayIndex; - } else if (End.Designator.isOnePastTheEnd()) { - // We're already pointing at the end of the object. - AmountToAdd = 0; - } - - QualType PointeeType = End.Designator.MostDerivedType; - assert(!PointeeType.isNull()); - if (PointeeType->isIncompleteType() || PointeeType->isFunctionType()) - return Error(E); - - if (!HandleLValueArrayAdjustment(Info, E, End, End.Designator.MostDerivedType, - AmountToAdd)) - return false; - - auto EndOffset = End.getLValueOffset(); - - // The following is a moderately common idiom in C: - // - // struct Foo { int a; char c[1]; }; - // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar)); - // strcpy(&F->c[0], Bar); - // - // So, if we see that we're examining a 1-length (or 0-length) array at the - // end of a struct with an unknown base, we give up instead of breaking code - // that behaves this way. Note that we only do this when Type=1, because - // Type=3 is a lower bound, so answering conservatively is fine. - if (End.InvalidBase && SubobjectOnly && Type == 1 && - End.Designator.Entries.size() == End.Designator.MostDerivedPathLength && - End.Designator.MostDerivedIsArrayElement && - End.Designator.MostDerivedArraySize < 2 && - isDesignatorAtObjectEnd(Info.Ctx, End)) + CharUnits EndOffset; + if (!determineEndOffset(Info, E->getExprLoc(), Type, LVal, EndOffset)) return false; - if (BaseOffset > EndOffset) - return Success(0, E); - - return Success((EndOffset - BaseOffset).getQuantity(), E); -} - -bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E, - unsigned Type) { - uint64_t Size; - bool WasError; - if (::tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size, &WasError)) - return Success(Size, E); - if (WasError) - return Error(E); - return false; + CharUnits BytesRemaining = EndOffset - LVal.getLValueOffset(); + Size = BytesRemaining.isNegative() ? 0 : BytesRemaining.getQuantity(); + return true; } bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) { @@ -6712,8 +6959,9 @@ E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue(); assert(Type <= 3 && "unexpected type"); - if (TryEvaluateBuiltinObjectSize(E, Type)) - return true; + uint64_t Size; + if (tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size)) + return Success(Size, E); if (E->getArg(0)->HasSideEffects(Info.Ctx)) return Success((Type & 2) ? 0 : -1, E); @@ -6726,7 +6974,7 @@ case EvalInfo::EM_ConstantFold: case EvalInfo::EM_EvaluateForOverflow: case EvalInfo::EM_IgnoreSideEffects: - case EvalInfo::EM_DesignatorFold: + case EvalInfo::EM_OffsetFold: // Leave it to IR generation. return Error(E); case EvalInfo::EM_ConstantExpressionUnevaluated: @@ -9735,5 +9983,5 @@ Expr::EvalStatus Status; EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); - return ::tryEvaluateBuiltinObjectSize(this, Type, Info, Result); + return tryEvaluateBuiltinObjectSize(this, Type, Info, Result); } Index: lib/CodeGen/CGCall.cpp =================================================================== --- lib/CodeGen/CGCall.cpp +++ lib/CodeGen/CGCall.cpp @@ -1655,6 +1655,14 @@ HasAnyX86InterruptAttr = TargetDecl->hasAttr(); HasOptnone = TargetDecl->hasAttr(); + if (auto *AllocSize = TargetDecl->getAttr()) { + Optional NumElemsParam; + // alloc_size args are base-1, 0 means not present. + if (unsigned N = AllocSize->getNumElemsParam()) + NumElemsParam = N - 1; + FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1, + NumElemsParam); + } } // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. Index: lib/Sema/SemaDeclAttr.cpp =================================================================== --- lib/Sema/SemaDeclAttr.cpp +++ lib/Sema/SemaDeclAttr.cpp @@ -244,6 +244,28 @@ return true; } +/// \brief Wrapper around checkUInt32Argument, with an extra check to be sure +/// that the result will fit into a regular (signed) int. All args have the same +/// purpose as they do in checkUInt32Argument. +static bool checkPositiveIntArgument(Sema &S, const AttributeList &Attr, + const Expr *Expr, int &Val, + unsigned Idx = UINT_MAX) { + uint32_t UVal; + if (!checkUInt32Argument(S, Attr, Expr, UVal, Idx)) + return false; + + if (UVal > std::numeric_limits::max()) { + llvm::APSInt I(32); // for toString + I = UVal; + S.Diag(Expr->getExprLoc(), diag::err_ice_too_large) + << I.toString(10, false) << 32 << /* Unsigned */ 0; + return false; + } + + Val = UVal; + return true; +} + /// \brief Diagnose mutually exclusive attributes when present on a given /// declaration. Returns true if diagnosed. template @@ -728,6 +750,69 @@ Attr.getAttributeSpellingListIndex())); } +/// \brief Checks to be sure that the given parameter number is inbounds, and is +/// an some integral type. Will emit appropriate diagnostics if this returns +/// false. +/// +/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used +/// to actually retrieve the argument, so it's base-0. +static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD, + const AttributeList &Attr, + unsigned FuncParamNo, unsigned AttrArgNo) { + assert(Attr.isArgExpr(AttrArgNo) && "Expected expression argument"); + uint64_t Idx; + if (!checkFunctionOrMethodParameterIndex(S, FD, Attr, FuncParamNo, + Attr.getArgAsExpr(AttrArgNo), Idx)) + return false; + + const ParmVarDecl *Param = FD->getParamDecl(Idx); + if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) { + SourceLocation SrcLoc = Attr.getArgAsExpr(AttrArgNo)->getLocStart(); + S.Diag(SrcLoc, diag::err_attribute_integers_only) + << Attr.getName() << Param->getSourceRange(); + return false; + } + return true; +} + +static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) { + if (!checkAttributeAtLeastNumArgs(S, Attr, 1) || + !checkAttributeAtMostNumArgs(S, Attr, 2)) + return; + + const auto *FD = cast(D); + if (!FD->getReturnType()->isPointerType()) { + S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only) + << Attr.getName(); + return; + } + + const Expr *SizeExpr = Attr.getArgAsExpr(0); + int SizeArgNo; + // Paramater indices are 1-indexed, hence Index=1 + if (!checkPositiveIntArgument(S, Attr, SizeExpr, SizeArgNo, /*Index=*/1)) + return; + + if (!checkParamIsIntegerType(S, FD, Attr, SizeArgNo, /*AttrArgNo=*/0)) + return; + + // Args are 1-indexed, so 0 implies that the arg was not present + int NumberArgNo = 0; + if (Attr.getNumArgs() == 2) { + const Expr *NumberExpr = Attr.getArgAsExpr(1); + // Paramater indices are 1-based, hence Index=2 + if (!checkPositiveIntArgument(S, Attr, NumberExpr, NumberArgNo, + /*Index=*/2)) + return; + + if (!checkParamIsIntegerType(S, FD, Attr, NumberArgNo, /*AttrArgNo=*/1)) + return; + } + + D->addAttr(::new (S.Context) AllocSizeAttr( + Attr.getRange(), S.Context, SizeArgNo, NumberArgNo, + Attr.getAttributeSpellingListIndex())); +} static bool checkTryLockFunAttrCommon(Sema &S, Decl *D, const AttributeList &Attr, @@ -5407,6 +5492,9 @@ case AttributeList::AT_AlignValue: handleAlignValueAttr(S, D, Attr); break; + case AttributeList::AT_AllocSize: + handleAllocSizeAttr(S, D, Attr); + break; case AttributeList::AT_AlwaysInline: handleAlwaysInlineAttr(S, D, Attr); break; Index: test/CodeGen/alloc-size.c =================================================================== --- /dev/null +++ test/CodeGen/alloc-size.c @@ -0,0 +1,370 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm %s -o - 2>&1 | FileCheck %s + +#define NULL ((void *)0) + +int gi; + +typedef unsigned long size_t; + +// CHECK-DAG-RE: define void @my_malloc({{.*}}) #[[MALLOC_ATTR_NUMBER:[0-9]+]] +// N.B. LLVM's allocsize arguments are base-0, whereas ours are base-1 (for +// compat with GCC) +// CHECK-DAG-RE: attributes #[[MALLOC_ATTR_NUMBER]] = {.*allocsize(0).*} +void *my_malloc(size_t) __attribute__((alloc_size(1))); + +// CHECK-DAG-RE: define void @my_calloc({{.*}}) #[[CALLOC_ATTR_NUMBER:[0-9]+]] +// CHECK-DAG-RE: attributes #[[CALLOC_ATTR_NUMBER]] = {.*allocsize(0, 1).*} +void *my_calloc(size_t, size_t) __attribute__((alloc_size(1, 2))); + +// CHECK-LABEL: @test1 +void test1() { + void *const vp = my_malloc(100); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 0); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 1); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 2); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 3); + + void *const arr = my_calloc(100, 5); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 0); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 1); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 2); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 3); + + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 0); + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 1); + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 2); + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 3); + + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 0); + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 1); + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 2); + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 3); + + void *const zeroPtr = my_malloc(0); + // CHECK: store i32 0 + gi = __builtin_object_size(zeroPtr, 0); + // CHECK: store i32 0 + gi = __builtin_object_size(my_malloc(0), 0); + + void *const zeroArr1 = my_calloc(0, 1); + void *const zeroArr2 = my_calloc(1, 0); + // CHECK: store i32 0 + gi = __builtin_object_size(zeroArr1, 0); + // CHECK: store i32 0 + gi = __builtin_object_size(zeroArr2, 0); + // CHECK: store i32 0 + gi = __builtin_object_size(my_calloc(1, 0), 0); + // CHECK: store i32 0 + gi = __builtin_object_size(my_calloc(0, 1), 0); +} + +// CHECK-LABEL: @test2 +void test2() { + void *const vp = my_malloc(gi); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(vp, 0); + + void *const arr1 = my_calloc(gi, 1); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr1, 0); + + void *const arr2 = my_calloc(1, gi); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr2, 0); +} + +// CHECK-LABEL: @test3 +void test3() { + char *const buf = (char *)my_calloc(100, 5); + // CHECK: store i32 500 + gi = __builtin_object_size(buf, 0); + // CHECK: store i32 500 + gi = __builtin_object_size(buf, 1); + // CHECK: store i32 500 + gi = __builtin_object_size(buf, 2); + // CHECK: store i32 500 + gi = __builtin_object_size(buf, 3); +} + +struct Data { + int a; + int t[10]; + char pad[3]; + char end[1]; +}; + +// CHECK-LABEL: @test5 +void test5() { + struct Data *const data = my_malloc(sizeof(*data)); + // CHECK: store i32 48 + gi = __builtin_object_size(data, 0); + // CHECK: store i32 48 + gi = __builtin_object_size(data, 1); + // CHECK: store i32 48 + gi = __builtin_object_size(data, 2); + // CHECK: store i32 48 + gi = __builtin_object_size(data, 3); + + // CHECK: store i32 40 + gi = __builtin_object_size(&data->t[1], 0); + // CHECK: store i32 36 + gi = __builtin_object_size(&data->t[1], 1); + // CHECK: store i32 40 + gi = __builtin_object_size(&data->t[1], 2); + // CHECK: store i32 36 + gi = __builtin_object_size(&data->t[1], 3); + + struct Data *const arr = my_calloc(sizeof(*data), 2); + // CHECK: store i32 96 + gi = __builtin_object_size(arr, 0); + // CHECK: store i32 96 + gi = __builtin_object_size(arr, 1); + // CHECK: store i32 96 + gi = __builtin_object_size(arr, 2); + // CHECK: store i32 96 + gi = __builtin_object_size(arr, 3); + + // CHECK: store i32 88 + gi = __builtin_object_size(&arr->t[1], 0); + // CHECK: store i32 36 + gi = __builtin_object_size(&arr->t[1], 1); + // CHECK: store i32 88 + gi = __builtin_object_size(&arr->t[1], 2); + // CHECK: store i32 36 + gi = __builtin_object_size(&arr->t[1], 3); +} + +// CHECK-LABEL: @test6 +void test6() { + // Things that would normally trigger conservative estimates don't need to do + // so when we know the source of the allocation. + struct Data *const data = my_malloc(sizeof(*data) + 10); + // CHECK: store i32 11 + gi = __builtin_object_size(data->end, 0); + // CHECK: store i32 11 + gi = __builtin_object_size(data->end, 1); + // CHECK: store i32 11 + gi = __builtin_object_size(data->end, 2); + // CHECK: store i32 11 + gi = __builtin_object_size(data->end, 3); + + struct Data *const arr = my_calloc(sizeof(*arr) + 5, 3); + // AFAICT, GCC treats malloc and calloc identically. So, we should do the + // same. + // + // Additionally, GCC ignores the initial array index when determining whether + // we're writing off the end of an alloc_size base. e.g. + // arr[0].end + // arr[1].end + // arr[2].end + // ...Are all considered "writing off the end", because there's no way to tell + // with high accuracy if the user meant "allocate a single N-byte `Data`", + // or "allocate M smaller `Data`s with extra padding". + + // CHECK: store i32 112 + gi = __builtin_object_size(arr->end, 0); + // CHECK: store i32 112 + gi = __builtin_object_size(arr->end, 1); + // CHECK: store i32 112 + gi = __builtin_object_size(arr->end, 2); + // CHECK: store i32 112 + gi = __builtin_object_size(arr->end, 3); + + // CHECK: store i32 112 + gi = __builtin_object_size(arr[0].end, 0); + // CHECK: store i32 112 + gi = __builtin_object_size(arr[0].end, 1); + // CHECK: store i32 112 + gi = __builtin_object_size(arr[0].end, 2); + // CHECK: store i32 112 + gi = __builtin_object_size(arr[0].end, 3); + + // CHECK: store i32 64 + gi = __builtin_object_size(arr[1].end, 0); + // CHECK: store i32 64 + gi = __builtin_object_size(arr[1].end, 1); + // CHECK: store i32 64 + gi = __builtin_object_size(arr[1].end, 2); + // CHECK: store i32 64 + gi = __builtin_object_size(arr[1].end, 3); + + // CHECK: store i32 16 + gi = __builtin_object_size(arr[2].end, 0); + // CHECK: store i32 16 + gi = __builtin_object_size(arr[2].end, 1); + // CHECK: store i32 16 + gi = __builtin_object_size(arr[2].end, 2); + // CHECK: store i32 16 + gi = __builtin_object_size(arr[2].end, 3); +} + +// CHECK-LABEL: @test7 +void test7() { + struct Data *const data = my_malloc(sizeof(*data) + 5); + // CHECK: store i32 9 + gi = __builtin_object_size(data->pad, 0); + // CHECK: store i32 3 + gi = __builtin_object_size(data->pad, 1); + // CHECK: store i32 9 + gi = __builtin_object_size(data->pad, 2); + // CHECK: store i32 3 + gi = __builtin_object_size(data->pad, 3); +} + +// CHECK-LABEL: @test8 +void test8() { + // Non-const pointers aren't currently supported. + void *buf = my_calloc(100, 5); + // CHECK: @llvm.objectsize.i64.p0i8(i8* %{{.*}}, i1 false) + gi = __builtin_object_size(buf, 0); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(buf, 1); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(buf, 2); + // CHECK: store i32 0 + gi = __builtin_object_size(buf, 3); +} + +// CHECK-LABEL: @test9 +void test9() { + // Check to be sure that we unwrap things correctly. + short *const buf0 = (my_malloc(100)); + short *const buf1 = (short*)(my_malloc(100)); + short *const buf2 = ((short*)(my_malloc(100))); + + // CHECK: store i32 100 + gi = __builtin_object_size(buf0, 0); + // CHECK: store i32 100 + gi = __builtin_object_size(buf1, 0); + // CHECK: store i32 100 + gi = __builtin_object_size(buf2, 0); +} + +// CHECK-LABEL: @test10 +void test10() { + // Yay overflow + short *const arr = my_calloc((size_t)-1 / 2 + 1, 2); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr, 0); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr, 1); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr, 2); + // CHECK: store i32 0 + gi = __builtin_object_size(arr, 3); + + // As an implementation detail, CharUnits can't handle numbers greater than or + // equal to 2**63. Realistically, this shouldn't be a problem, but we should + // be sure we don't emit crazy results for this case. + short *const buf = my_malloc((size_t)-1); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(buf, 0); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(buf, 1); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(buf, 2); + // CHECK: store i32 0 + gi = __builtin_object_size(buf, 3); + + short *const arr_big = my_calloc((size_t)-1 / 2 - 1, 2); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr_big, 0); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr_big, 1); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr_big, 2); + // CHECK: store i32 0 + gi = __builtin_object_size(arr_big, 3); +} + +void *my_tiny_malloc(char) __attribute__((alloc_size(1))); +void *my_tiny_calloc(char, char) __attribute__((alloc_size(1, 2))); + +// CHECK-LABEL: @test11 +void test11() { + void *const vp = my_tiny_malloc(100); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 0); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 1); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 2); + // CHECK: store i32 100 + gi = __builtin_object_size(vp, 3); + + // N.B. This causes char overflow, but not size_t overflow, so it should be + // supported. + void *const arr = my_tiny_calloc(100, 5); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 0); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 1); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 2); + // CHECK: store i32 500 + gi = __builtin_object_size(arr, 3); + + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 0); + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 1); + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 2); + // CHECK: store i32 100 + gi = __builtin_object_size(my_malloc(100), 3); + + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 0); + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 1); + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 2); + // CHECK: store i32 500 + gi = __builtin_object_size(my_calloc(100, 5), 3); +} + +void *my_signed_malloc(long) __attribute__((alloc_size(1))); +void *my_signed_calloc(long, long) __attribute__((alloc_size(1, 2))); + +// CHECK-LABEL: @test12 +void test12() { + // CHECK: store i32 100 + gi = __builtin_object_size(my_signed_malloc(100), 0); + // CHECK: store i32 500 + gi = __builtin_object_size(my_signed_calloc(100, 5), 0); + + void *const vp = my_signed_malloc(-2); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(vp, 0); + // N.B. These get lowered to -1 because the function calls may have + // side-effects, and we can't determine the objectsize. + // CHECK: store i32 -1 + gi = __builtin_object_size(my_signed_malloc(-2), 0); + + void *const arr1 = my_signed_calloc(-2, 1); + void *const arr2 = my_signed_calloc(1, -2); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr1, 0); + // CHECK: @llvm.objectsize + gi = __builtin_object_size(arr2, 0); + // CHECK: store i32 -1 + gi = __builtin_object_size(my_signed_calloc(1, -2), 0); + // CHECK: store i32 -1 + gi = __builtin_object_size(my_signed_calloc(-2, 1), 0); +} Index: test/CodeGenCXX/alloc-size.cpp =================================================================== --- /dev/null +++ test/CodeGenCXX/alloc-size.cpp @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -O0 %s -o - 2>&1 -std=c++11 | FileCheck %s + +namespace templates { +void *my_malloc(int N) __attribute__((alloc_size(1))); +void *my_calloc(int N, int M) __attribute__((alloc_size(1, 2))); + +struct MyType { + int arr[4]; +}; + +template int callMalloc(); + +template int callCalloc(); + +// CHECK-LABEL: define i32 @_ZN9templates6testItEv() +int testIt() { + // CHECK: call i32 @_ZN9templates10callMallocINS_6MyTypeEEEiv + // CHECK: call i32 @_ZN9templates10callCallocINS_6MyTypeELi4EEEiv + return callMalloc() + callCalloc(); +} + +// CHECK-LABEL: define linkonce_odr i32 +// @_ZN9templates10callMallocINS_6MyTypeEEEiv +template int callMalloc() { + static_assert(sizeof(T) == 16, ""); + // CHECK: ret i32 16 + return __builtin_object_size(my_malloc(sizeof(T)), 0); +} + +// CHECK-LABEL: define linkonce_odr i32 +// @_ZN9templates10callCallocINS_6MyTypeELi4EEEiv +template int callCalloc() { + static_assert(sizeof(T) * N == 64, ""); + // CHECK: ret i32 64 + return __builtin_object_size(my_malloc(sizeof(T) * N), 0); +} +} + +namespace templated_alloc_size { +using size_t = unsigned long; + +// We don't need bodies for any of these, because they're only used in +// __builtin_object_size, and that shouldn't need anything but a function +// decl with alloc_size on it. +template +T *my_malloc(size_t N = sizeof(T)) __attribute__((alloc_size(1))); + +template +T *my_calloc(size_t M, size_t N = sizeof(T)) __attribute__((alloc_size(2, 1))); + +template +void *dependent_malloc(size_t NT = N) __attribute__((alloc_size(1))); + +template +void *dependent_calloc(size_t NT = N, size_t MT = M) + __attribute__((alloc_size(1, 2))); + +template +void *dependent_calloc2(size_t NT = sizeof(T), size_t MT = M) + __attribute__((alloc_size(1, 2))); + +// CHECK-LABEL: define i32 @_ZN20templated_alloc_size6testItEv +int testIt() { + // 122 = 4 + 5*4 + 6 + 7*8 + 4*9 + // CHECK: ret i32 122 + return __builtin_object_size(my_malloc(), 0) + + __builtin_object_size(my_calloc(5), 0) + + __builtin_object_size(dependent_malloc<6>(), 0) + + __builtin_object_size(dependent_calloc<7, 8>(), 0) + + __builtin_object_size(dependent_calloc2(), 0); +} +} Index: test/CodeGenCXX/global-init.cpp =================================================================== --- test/CodeGenCXX/global-init.cpp +++ test/CodeGenCXX/global-init.cpp @@ -18,9 +18,6 @@ // CHECK: @__dso_handle = external global i8 // CHECK: @c = global %struct.C zeroinitializer, align 8 -// It's okay if we ever implement the IR-generation optimization to remove this. -// CHECK: @_ZN5test3L3varE = internal constant i8* getelementptr inbounds ([7 x i8], [7 x i8]* - // PR6205: The casts should not require global initializers // CHECK: @_ZN6PR59741cE = external global %"struct.PR5974::C" // CHECK: @_ZN6PR59741aE = global %"struct.PR5974::A"* getelementptr inbounds (%"struct.PR5974::C", %"struct.PR5974::C"* @_ZN6PR59741cE, i32 0, i32 0) Index: test/Sema/alloc-size.c =================================================================== --- /dev/null +++ test/Sema/alloc-size.c @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 %s -verify + +void *fail1(int a) __attribute__((alloc_size)); //expected-error{{'alloc_size' attribute takes at least 1 argument}} +void *fail2(int a) __attribute__((alloc_size())); //expected-error{{'alloc_size' attribute takes at least 1 argument}} + +void *fail3(int a) __attribute__((alloc_size(0))); //expected-error{{'alloc_size' attribute parameter 0 is out of bounds}} +void *fail4(int a) __attribute__((alloc_size(2))); //expected-error{{'alloc_size' attribute parameter 2 is out of bounds}} + +void *fail5(int a, int b) __attribute__((alloc_size(0, 1))); //expected-error{{'alloc_size' attribute parameter 0 is out of bounds}} +void *fail6(int a, int b) __attribute__((alloc_size(3, 1))); //expected-error{{'alloc_size' attribute parameter 3 is out of bounds}} + +void *fail7(int a, int b) __attribute__((alloc_size(1, 0))); //expected-error{{'alloc_size' attribute parameter 0 is out of bounds}} +void *fail8(int a, int b) __attribute__((alloc_size(1, 3))); //expected-error{{'alloc_size' attribute parameter 3 is out of bounds}} + +int fail9(int a) __attribute__((alloc_size(1))); //expected-warning{{'alloc_size' attribute only applies to return values that are pointers}} + +int fail10 __attribute__((alloc_size(1))); //expected-warning{{'alloc_size' attribute only applies to non-K&R-style functions}} + +void *fail11(void *a) __attribute__((alloc_size(1))); //expected-error{{'alloc_size' attribute argument may only refer to a function parameter of integer type}} + +void *fail12(int a) __attribute__((alloc_size("abc"))); //expected-error{{'alloc_size' attribute requires parameter 1 to be an integer constant}} +void *fail12(int a) __attribute__((alloc_size(1, "abc"))); //expected-error{{'alloc_size' attribute requires parameter 2 to be an integer constant}} +void *fail13(int a) __attribute__((alloc_size(1U<<31))); //expected-error{{integer constant expression evaluates to value 2147483648 that cannot be represented in a 32-bit signed integer type}} Index: test/SemaCXX/constant-expression-cxx11.cpp =================================================================== --- test/SemaCXX/constant-expression-cxx11.cpp +++ test/SemaCXX/constant-expression-cxx11.cpp @@ -1156,7 +1156,7 @@ constexpr int m2b = const_cast(n2); // expected-error {{constant expression}} expected-note {{read of volatile object 'n2'}} struct T { int n; }; -const T t = { 42 }; // expected-note {{declared here}} +const T t = { 42 }; constexpr int f(volatile int &&r) { return r; // expected-note {{read of volatile-qualified type 'volatile int'}} @@ -1168,7 +1168,7 @@ int j : f(0); // expected-error {{constant expression}} expected-note {{in call to 'f(0)'}} int k : g(0); // expected-error {{constant expression}} expected-note {{temporary created here}} expected-note {{in call to 'g(0)'}} int l : n3; // expected-error {{constant expression}} expected-note {{read of non-const variable}} - int m : t.n; // expected-error {{constant expression}} expected-note {{read of non-constexpr variable}} + int m : t.n; // expected-warning{{width of bit-field 'm' (42 bits)}} }; }