Index: llvm/trunk/include/llvm/Transforms/IPO/Attributor.h =================================================================== --- llvm/trunk/include/llvm/Transforms/IPO/Attributor.h +++ llvm/trunk/include/llvm/Transforms/IPO/Attributor.h @@ -589,32 +589,30 @@ /// the one reasoning about the "captured" state for the argument or the one /// reasoning on the memory access behavior of the function as a whole. template - const AAType *getAAFor(const AbstractAttribute &QueryingAA, + const AAType &getAAFor(const AbstractAttribute &QueryingAA, const IRPosition &IRP) { static_assert(std::is_base_of::value, "Cannot query an attribute with a type not derived from " "'AbstractAttribute'!"); - // Let's try an equivalent position if available, see - // SubsumingPositionIterator for more information. - for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) { - // Lookup the abstract attribute of type AAType. If found, return it after - // registering a dependence of QueryingAA on the one returned attribute. - const auto &KindToAbstractAttributeMap = - AAMap.lookup(const_cast(EquivIRP)); - if (AAType *AA = static_cast( - KindToAbstractAttributeMap.lookup(&AAType::ID))) { - // Do not return an attribute with an invalid state. This minimizes - // checks at the calls sites and allows the fallback below to kick in. - if (AA->getState().isValidState()) { - QueryMap[AA].insert(const_cast(&QueryingAA)); - return AA; - } - } + // Lookup the abstract attribute of type AAType. If found, return it after + // registering a dependence of QueryingAA on the one returned attribute. + const auto &KindToAbstractAttributeMap = + AAMap.lookup(const_cast(IRP)); + if (AAType *AA = static_cast( + KindToAbstractAttributeMap.lookup(&AAType::ID))) { + // Do not registr a dependence on an attribute with an invalid state. + if (AA->getState().isValidState()) + QueryMap[AA].insert(const_cast(&QueryingAA)); + return *AA; } - // No matching attribute found - return nullptr; + // No matching attribute found, create one. + auto &AA = AAType::createForPosition(IRP, *this); + registerAA(AA); + if (AA.getState().isValidState()) + QueryMap[&AA].insert(const_cast(&QueryingAA)); + return AA; } /// Introduce a new abstract attribute into the fixpoint analysis. @@ -1145,6 +1143,10 @@ virtual size_t getNumReturnValues() const = 0; virtual const SmallPtrSetImpl &getUnresolvedCalls() const = 0; + /// Create an abstract attribute view for the position \p IRP. + static AAReturnedValues &createForPosition(const IRPosition &IRP, + Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1160,6 +1162,9 @@ /// Returns true if nounwind is known. bool isKnownNoUnwind() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1175,6 +1180,9 @@ /// Returns true if "nosync" is known. bool isKnownNoSync() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1191,6 +1199,9 @@ /// Return true if we know that underlying value is nonnull. bool isKnownNonNull() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1207,6 +1218,9 @@ /// Return true if "norecurse" is known. bool isKnownNoRecurse() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1223,6 +1237,9 @@ /// Return true if "willreturn" is known. bool isKnownWillReturn() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1239,6 +1256,9 @@ /// Return true if we know that underlying value is noalias. bool isKnownNoAlias() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1255,6 +1275,9 @@ /// Return true if "nofree" is known. bool isKnownNoFree() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1271,6 +1294,9 @@ /// Return true if the underlying object is known to never return. bool isKnownNoReturn() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1296,7 +1322,7 @@ /// of instructions is live. template bool isLiveInstSet(T begin, T end) const { for (const auto &I : llvm::make_range(begin, end)) { - assert(I->getFunction() == getIRPosition().getAnchorScope() && + assert(I->getFunction() == getIRPosition().getAssociatedFunction() && "Instruction must be in the same anchor scope function."); if (!isAssumedDead(I)) @@ -1313,6 +1339,9 @@ const IRPosition &getIRPosition() const { return *this; } ///} + /// Create an abstract attribute view for the position \p IRP. + static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1339,6 +1368,10 @@ /// Return known dereferenceable bytes. virtual uint32_t getKnownDereferenceableBytes() const = 0; + /// Create an abstract attribute view for the position \p IRP. + static AADereferenceable &createForPosition(const IRPosition &IRP, + Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; @@ -1355,6 +1388,9 @@ /// Return known alignemnt. unsigned getKnownAlign() const { return getKnown(); } + /// Create an abstract attribute view for the position \p IRP. + static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A); + /// Unique ID (due to the unique address) static const char ID; }; Index: llvm/trunk/lib/Transforms/IPO/Attributor.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/Attributor.cpp +++ llvm/trunk/lib/Transforms/IPO/Attributor.cpp @@ -146,7 +146,7 @@ const AAIsDead *LivenessAA = nullptr; if (IRP.getAnchorScope()) - LivenessAA = A.getAAFor( + LivenessAA = &A.getAAFor( QueryingAA, IRPosition::function(*IRP.getAnchorScope())); // TODO: Use Positions here to allow context sensitivity in VisitValueCB @@ -196,10 +196,11 @@ // Look through phi nodes, visit all live operands. if (auto *PHI = dyn_cast(V)) { + assert(LivenessAA && + "Expected liveness in the presence of instructions!"); for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { const BasicBlock *IncomingBB = PHI->getIncomingBlock(u); - if (!LivenessAA || - !LivenessAA->isAssumedDead(IncomingBB->getTerminator())) + if (!LivenessAA->isAssumedDead(IncomingBB->getTerminator())) Worklist.push_back(PHI->getIncomingValue(u)); } continue; @@ -275,8 +276,6 @@ ChangeStatus IRAttributeManifest::manifestAttrs(Attributor &A, IRPosition &IRP, const ArrayRef &DeducedAttrs) { - ChangeStatus HasChanged = ChangeStatus::UNCHANGED; - Function *ScopeFn = IRP.getAssociatedFunction(); IRPosition::Kind PK = IRP.getPositionKind(); @@ -288,7 +287,7 @@ switch (PK) { case IRPosition::IRP_INVALID: case IRPosition::IRP_FLOAT: - llvm_unreachable("Cannot manifest at a floating or invalid position!"); + return ChangeStatus::UNCHANGED; case IRPosition::IRP_ARGUMENT: case IRPosition::IRP_FUNCTION: case IRPosition::IRP_RETURNED: @@ -301,6 +300,7 @@ break; } + ChangeStatus HasChanged = ChangeStatus::UNCHANGED; LLVMContext &Ctx = IRP.getAnchorValue().getContext(); for (const Attribute &Attr : DeducedAttrs) { if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx())) @@ -502,15 +502,10 @@ // Callback for each possibly returned value. auto CheckReturnValue = [&](Value &RV) -> bool { const IRPosition &RVPos = IRPosition::value(RV); - const AAType *AA = A.getAAFor(QueryingAA, RVPos); - LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV - << " AA: " << (AA ? AA->getAsStr() : "n/a") << " @ " - << RVPos << "\n"); - // TODO: We should create abstract attributes on-demand, patches are already - // prepared, pending approval. - if (!AA || AA->getIRPosition() != RVPos) - return false; - const StateType &AAS = static_cast(AA->getState()); + const AAType &AA = A.getAAFor(QueryingAA, RVPos); + LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() + << " @ " << RVPos << "\n"); + const StateType &AAS = static_cast(AA.getState()); if (T.hasValue()) *T &= AAS; else @@ -527,9 +522,10 @@ } /// Helper class for generic deduction: return value -> returned position. -template -struct AAReturnedFromReturnedValues : public AAType { - AAReturnedFromReturnedValues(const IRPosition &IRP) : AAType(IRP) {} +template +struct AAReturnedFromReturnedValues : public Base { + AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {} /// See AbstractAttribute::updateImpl(...). ChangeStatus updateImpl(Attributor &A) override { @@ -563,15 +559,10 @@ auto CallSiteCheck = [&](CallSite CS) { const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo); - const AAType *AA = A.getAAFor(QueryingAA, CSArgPos); + const AAType &AA = A.getAAFor(QueryingAA, CSArgPos); LLVM_DEBUG(dbgs() << "[Attributor] CS: " << *CS.getInstruction() - << " AA: " << (AA ? AA->getAsStr() : "n/a") << " @" - << CSArgPos << "\n"); - // TODO: We should create abstract attributes on-demand, patches are already - // prepared, pending approval. - if (!AA || AA->getIRPosition() != CSArgPos) - return false; - const StateType &AAS = static_cast(AA->getState()); + << " AA: " << AA.getAsStr() << " @" << CSArgPos << "\n"); + const StateType &AAS = static_cast(AA.getState()); if (T.hasValue()) *T &= AAS; else @@ -588,9 +579,10 @@ } /// Helper class for generic deduction: call site argument -> argument position. -template -struct AAArgumentFromCallSiteArguments : public AAType { - AAArgumentFromCallSiteArguments(const IRPosition &IRP) : AAType(IRP) {} +template +struct AAArgumentFromCallSiteArguments : public Base { + AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {} /// See AbstractAttribute::updateImpl(...). ChangeStatus updateImpl(Attributor &A) override { @@ -603,9 +595,9 @@ }; /// Helper class for generic replication: function returned -> cs returned. -template -struct AACallSiteReturnedFromReturned : public AAType { - AACallSiteReturnedFromReturned(const IRPosition &IRP) : AAType(IRP) {} +template +struct AACallSiteReturnedFromReturned : public Base { + AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {} /// See AbstractAttribute::updateImpl(...). ChangeStatus updateImpl(Attributor &A) override { @@ -621,13 +613,9 @@ return S.indicatePessimisticFixpoint(); IRPosition FnPos = IRPosition::returned(*AssociatedFunction); - // TODO: We should create abstract attributes on-demand, patches are already - // prepared, pending approval. - const AAType *AA = A.getAAFor(*this, FnPos); - if (!AA) - return S.indicatePessimisticFixpoint(); + const AAType &AA = A.getAAFor(*this, FnPos); return clampStateAndIndicateChange( - S, static_cast(AA->getState())); + S, static_cast(AA.getState())); } }; @@ -657,8 +645,12 @@ if (!I.mayThrow()) return true; - auto *NoUnwindAA = A.getAAFor(*this, IRPosition::value(I)); - return NoUnwindAA && NoUnwindAA->isAssumedNoUnwind(); + if (ImmutableCallSite ICS = ImmutableCallSite(&I)) { + const auto &NoUnwindAA = + A.getAAFor(*this, IRPosition::callsite_function(ICS)); + return NoUnwindAA.isAssumedNoUnwind(); + } + return false; }; if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) @@ -940,23 +932,21 @@ if (!CB || UnresolvedCalls.count(CB)) continue; - const auto *RetValAAPtr = + const auto &RetValAA = A.getAAFor(*this, IRPosition::callsite_function(*CB)); + LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " + << static_cast(RetValAA) + << "\n"); // Skip dead ends, thus if we do not know anything about the returned // call we mark it as unresolved and it will stay that way. - if (!RetValAAPtr || !RetValAAPtr->getState().isValidState()) { + if (!RetValAA.getState().isValidState()) { LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB << "\n"); UnresolvedCalls.insert(CB); continue; } - const auto &RetValAA = *RetValAAPtr; - LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " - << static_cast(RetValAA) - << "\n"); - // Do not try to learn partial information. If the callee has unresolved // return values we will treat the call as unresolved/opaque. auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); @@ -1167,9 +1157,9 @@ if (ICS.hasFnAttr(Attribute::NoSync)) return true; - auto *NoSyncAA = - A.getAAFor(*this, IRPosition::callsite_function(ICS)); - if (NoSyncAA && NoSyncAA->isAssumedNoSync()) + const auto &NoSyncAA = + A.getAAFor(*this, IRPosition::callsite_function(ICS)); + if (NoSyncAA.isAssumedNoSync()) return true; return false; } @@ -1225,9 +1215,9 @@ if (ICS.hasFnAttr(Attribute::NoFree)) return true; - auto *NoFreeAA = - A.getAAFor(*this, IRPosition::callsite_function(ICS)); - return NoFreeAA && NoFreeAA->isAssumedNoFree(); + const auto &NoFreeAA = + A.getAAFor(*this, IRPosition::callsite_function(ICS)); + return NoFreeAA.isAssumedNoFree(); }; if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) @@ -1295,19 +1285,17 @@ auto VisitValueCB = [&](Value &V, AAAlign::StateType &T, bool Stripped) -> bool { - if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, + const auto &AA = A.getAAFor(*this, IRPosition::value(V)); + if (!Stripped && this == &AA) { + if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, /* TODO: CtxI */ nullptr, - /* TODO: DT */ nullptr)) { - // Known non-zero, all good. - } else if (const auto *AA = - A.getAAFor(*this, IRPosition::value(V))) { - // Try to use abstract attribute information. - if (!AA->isAssumedNonNull()) + /* TODO: DT */ nullptr)) T.indicatePessimisticFixpoint(); } else { - // IR information was not sufficient and we did not find an abstract - // attribute to use. TODO: on-demand attribute creation! - T.indicatePessimisticFixpoint(); + // Use abstract attribute information. + const AANonNull::StateType &NS = + static_cast(AA.getState()); + T ^= NS; } return T.isValidState(); }; @@ -1325,9 +1313,10 @@ }; /// NonNull attribute for function return value. -struct AANonNullReturned final : AAReturnedFromReturnedValues { +struct AANonNullReturned final + : AAReturnedFromReturnedValues { AANonNullReturned(const IRPosition &IRP) - : AAReturnedFromReturnedValues(IRP) {} + : AAReturnedFromReturnedValues(IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } @@ -1335,9 +1324,9 @@ /// NonNull attribute for function argument. struct AANonNullArgument final - : AAArgumentFromCallSiteArguments { + : AAArgumentFromCallSiteArguments { AANonNullArgument(const IRPosition &IRP) - : AAArgumentFromCallSiteArguments(IRP) {} + : AAArgumentFromCallSiteArguments(IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } @@ -1352,9 +1341,9 @@ /// NonNull attribute for a call site return position. struct AANonNullCallSiteReturned final - : AACallSiteReturnedFromReturned { + : AACallSiteReturnedFromReturned { AANonNullCallSiteReturned(const IRPosition &IRP) - : AACallSiteReturnedFromReturned(IRP) {} + : AACallSiteReturnedFromReturned(IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } @@ -1437,21 +1426,14 @@ /// See AbstractAttribute::updateImpl(...). ChangeStatus updateImpl(Attributor &A) override { auto CheckForWillReturn = [&](Instruction &I) { - ImmutableCallSite ICS(&I); - if (ICS.hasFnAttr(Attribute::WillReturn)) + IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I)); + const auto &WillReturnAA = A.getAAFor(*this, IPos); + if (WillReturnAA.isKnownWillReturn()) return true; - - IRPosition IPos = IRPosition::callsite_function(ICS); - auto *WillReturnAA = A.getAAFor(*this, IPos); - if (!WillReturnAA || !WillReturnAA->isAssumedWillReturn()) + if (!WillReturnAA.isAssumedWillReturn()) return false; - - // FIXME: Prohibit any recursion for now. - if (ICS.hasFnAttr(Attribute::NoRecurse)) - return true; - - auto *NoRecurseAA = A.getAAFor(*this, IPos); - return NoRecurseAA && NoRecurseAA->isAssumedNoRecurse(); + const auto &NoRecurseAA = A.getAAFor(*this, IPos); + return NoRecurseAA.isAssumedNoRecurse(); }; if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) @@ -1553,12 +1535,10 @@ if (!ICS) return false; - if (!ICS.returnDoesNotAlias()) { - auto *NoAliasAA = - A.getAAFor(*this, IRPosition::callsite_returned(ICS)); - if (!NoAliasAA || !NoAliasAA->isAssumedNoAlias()) - return false; - } + const auto &NoAliasAA = + A.getAAFor(*this, IRPosition::callsite_returned(ICS)); + if (!NoAliasAA.isAssumedNoAlias()) + return false; /// FIXME: We can improve capture check in two ways: /// 1. Use the AANoCapture facilities. @@ -1649,10 +1629,9 @@ /// and only place an unreachable in the normal successor. if (Invoke2CallAllowed) { if (Function *Callee = II->getCalledFunction()) { - auto *AANoUnw = - A.getAAFor(*this, IRPosition::function(*Callee)); - if (Callee->hasFnAttribute(Attribute::NoUnwind) || - (AANoUnw && AANoUnw->isAssumedNoUnwind())) { + const IRPosition &IPos = IRPosition::callsite_function(*II); + const auto &AANoUnw = A.getAAFor(*this, IPos); + if (AANoUnw.isAssumedNoUnwind()) { LLVM_DEBUG(dbgs() << "[AAIsDead] Replace invoke with call inst\n"); // We do not need an invoke (II) but instead want a call followed @@ -1791,17 +1770,15 @@ // instruction but the unwind block might still be. if (auto *Invoke = dyn_cast(I)) { // Use nounwind to justify the unwind block is dead as well. - auto *AANoUnw = A.getAAFor(*this, IPos); - if (!Invoke2CallAllowed || - (!AANoUnw || !AANoUnw->isAssumedNoUnwind())) { + const auto &AANoUnw = A.getAAFor(*this, IPos); + if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) { AssumedLiveBlocks.insert(Invoke->getUnwindDest()); ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front()); } } - auto *NoReturnAA = A.getAAFor(*this, IPos); - if (ICS.hasFnAttr(Attribute::NoReturn) || - (NoReturnAA && NoReturnAA->isAssumedNoReturn())) + const auto &NoReturnAA = A.getAAFor(*this, IPos); + if (NoReturnAA.isAssumedNoReturn()) return I; } @@ -1967,7 +1944,7 @@ for (const Attribute &Attr : Attrs) takeKnownDerefBytesMaximum(Attr.getValueAsInt()); - NonNullAA = A.getAAFor(*this, getIRPosition()); + NonNullAA = &A.getAAFor(*this, getIRPosition()); } /// See AbstractAttribute::getState() @@ -2038,18 +2015,17 @@ const Value *Base = V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset); - const auto *AA = - A.getAAFor(*this, IRPosition::value(*Base)); + const auto &AA = + A.getAAFor(*this, IRPosition::value(*Base)); int64_t DerefBytes = 0; - if (!AA || (!Stripped && - getIRPosition().getPositionKind() == IRPosition::IRP_FLOAT)) { + if (!Stripped && this == &AA) { // Use IR information if we did not strip anything. // TODO: track globally. bool CanBeNull; DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); T.GlobalState.indicatePessimisticFixpoint(); } else { - const DerefState &DS = static_cast(AA->getState()); + const DerefState &DS = static_cast(AA.getState()); DerefBytes = DS.DerefBytesState.getAssumed(); T.GlobalState &= DS.GlobalState; } @@ -2057,8 +2033,7 @@ T.takeAssumedDerefBytesMinimum( std::max(int64_t(0), DerefBytes - Offset.getSExtValue())); - if (!Stripped && - getIRPosition().getPositionKind() == IRPosition::IRP_FLOAT) { + if (!Stripped && this == &AA) { T.takeKnownDerefBytesMaximum( std::max(int64_t(0), DerefBytes - Offset.getSExtValue())); T.indicatePessimisticFixpoint(); @@ -2083,9 +2058,11 @@ /// Dereferenceable attribute for a return value. struct AADereferenceableReturned final - : AAReturnedFromReturnedValues { + : AAReturnedFromReturnedValues { AADereferenceableReturned(const IRPosition &IRP) - : AAReturnedFromReturnedValues(IRP) {} + : AAReturnedFromReturnedValues(IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { @@ -2095,9 +2072,12 @@ /// Dereferenceable attribute for an argument struct AADereferenceableArgument final - : AAArgumentFromCallSiteArguments { + : AAArgumentFromCallSiteArguments { AADereferenceableArgument(const IRPosition &IRP) - : AAArgumentFromCallSiteArguments(IRP) {} + : AAArgumentFromCallSiteArguments( + IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override{ @@ -2163,21 +2143,16 @@ auto VisitValueCB = [&](Value &V, AAAlign::StateType &T, bool Stripped) -> bool { - if (!Stripped && - getIRPosition().getPositionKind() == IRPosition::IRP_FLOAT) { + const auto &AA = A.getAAFor(*this, IRPosition::value(V)); + if (!Stripped && this == &AA) { // Use only IR information if we did not strip anything. T.takeKnownMaximum(V.getPointerAlignment(DL)); T.indicatePessimisticFixpoint(); - } else if (const auto *AA = - A.getAAFor(*this, IRPosition::value(V))) { - // Try to use abstract attribute information. - const AAAlign::StateType &DS = - static_cast(AA->getState()); - T.takeAssumedMinimum(DS.getAssumed()); } else { - // Last resort, look into the IR. - T.takeKnownMaximum(V.getPointerAlignment(DL)); - T.indicatePessimisticFixpoint(); + // Use abstract attribute information. + const AAAlign::StateType &DS = + static_cast(AA.getState()); + T ^= DS; } return T.isValidState(); }; @@ -2197,18 +2172,20 @@ }; /// Align attribute for function return value. -struct AAAlignReturned final : AAReturnedFromReturnedValues { +struct AAAlignReturned final + : AAReturnedFromReturnedValues { AAAlignReturned(const IRPosition &IRP) - : AAReturnedFromReturnedValues(IRP) {} + : AAReturnedFromReturnedValues(IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } }; /// Align attribute for function argument. -struct AAAlignArgument final : AAArgumentFromCallSiteArguments { +struct AAAlignArgument final + : AAArgumentFromCallSiteArguments { AAAlignArgument(const IRPosition &IRP) - : AAArgumentFromCallSiteArguments(IRP) {} + : AAArgumentFromCallSiteArguments(IRP) {} /// See AbstractAttribute::trackStatistics() void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } @@ -2271,8 +2248,8 @@ if (!LivenessAA) LivenessAA = - getAAFor(AA, IRPosition::function(*CtxI->getFunction())); - if (!LivenessAA || !LivenessAA->isAssumedDead(CtxI)) + &getAAFor(AA, IRPosition::function(*CtxI->getFunction())); + if (!LivenessAA->isAssumedDead(CtxI)) return false; // TODO: Do not track dependences automatically but add it here as only a @@ -2303,11 +2280,11 @@ Instruction *I = cast(U.getUser()); Function *Caller = I->getFunction(); - auto *LivenessAA = + const auto &LivenessAA = getAAFor(QueryingAA, IRPosition::function(*Caller)); // Skip dead calls. - if (LivenessAA && LivenessAA->isAssumedDead(I)) + if (LivenessAA.isAssumedDead(I)) continue; CallSite CS(U.getUser()); @@ -2348,10 +2325,10 @@ // and liveness information. const IRPosition &QueryIRP = IRPosition::function_scope(IRP); const auto &AARetVal = getAAFor(QueryingAA, QueryIRP); - if (!AARetVal || !AARetVal->getState().isValidState()) + if (!AARetVal.getState().isValidState()) return false; - return AARetVal->checkForAllReturnedValuesAndReturnInsts(Pred); + return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred); } bool Attributor::checkForAllReturnedValues( @@ -2365,10 +2342,10 @@ const IRPosition &QueryIRP = IRPosition::function_scope(IRP); const auto &AARetVal = getAAFor(QueryingAA, QueryIRP); - if (!AARetVal || !AARetVal->getState().isValidState()) + if (!AARetVal.getState().isValidState()) return false; - return AARetVal->checkForAllReturnedValuesAndReturnInsts( + return AARetVal.checkForAllReturnedValuesAndReturnInsts( [&](Value &RV, const SmallPtrSetImpl &) { return Pred(RV); }); @@ -2392,7 +2369,7 @@ for (unsigned Opcode : Opcodes) { for (Instruction *I : OpcodeInstMap[Opcode]) { // Skip dead instructions. - if (LivenessAA && LivenessAA->isAssumedDead(I)) + if (LivenessAA.isAssumedDead(I)) continue; if (!Pred(*I)) @@ -2418,7 +2395,7 @@ for (Instruction *I : InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) { // Skip dead instructions. - if (LivenessAA && LivenessAA->isAssumedDead(I)) + if (LivenessAA.isAssumedDead(I)) continue; if (!Pred(*I)) @@ -2429,9 +2406,9 @@ } ChangeStatus Attributor::run() { - // Initialize all abstract attributes. - for (AbstractAttribute *AA : AllAbstractAttributes) - AA->initialize(*this); + // Initialize all abstract attributes, allow new ones to be created. + for (unsigned u = 0; u < AllAbstractAttributes.size(); u++) + AllAbstractAttributes[u]->initialize(*this); LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized " << AllAbstractAttributes.size() @@ -2447,6 +2424,8 @@ Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end()); do { + // Remember the size to determine new attributes. + size_t NumAAs = AllAbstractAttributes.size(); LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter << ", Worklist size: " << Worklist.size() << "\n"); @@ -2472,8 +2451,15 @@ Worklist.clear(); Worklist.insert(ChangedAAs.begin(), ChangedAAs.end()); + // Add attributes to the worklist that have been created in the last + // iteration. + Worklist.insert(AllAbstractAttributes.begin() + NumAAs, + AllAbstractAttributes.end()); + } while (!Worklist.empty() && ++IterationCounter < MaxFixpointIterations); + size_t NumFinalAAs = AllAbstractAttributes.size(); + LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: " << IterationCounter << "/" << MaxFixpointIterations << " iterations\n"); @@ -2566,6 +2552,9 @@ NumAttributesManifested += NumManifested; NumAttributesValidFixpoint += NumAtFixpoint; + assert( + NumFinalAAs == AllAbstractAttributes.size() && + "Expected the final number of abstract attributes to remain unchanged!"); return ManifestChange; } @@ -2578,8 +2567,8 @@ /// /// \returns The created abstract argument, or nullptr if none was created. template -static AAType *checkAndRegisterAA(const IRPosition &IRP, Attributor &A, - DenseSet *Whitelist) { +static const AAType *checkAndRegisterAA(const IRPosition &IRP, Attributor &A, + DenseSet *Whitelist) { if (Whitelist && !Whitelist->count(&AAType::ID)) return nullptr; @@ -2853,6 +2842,71 @@ const char AADereferenceable::ID = 0; const char AAAlign::ID = 0; +// Macro magic to create the static generator function for attributes that +// follow the naming scheme. + +#define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ + case IRPosition::PK: \ + llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); + +#define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ + case IRPosition::PK: \ + AA = new CLASS##SUFFIX(IRP); \ + break; + +#define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ + CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ + CLASS *AA = nullptr; \ + switch (IRP.getPositionKind()) { \ + SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ + SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ + SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ + SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ + SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ + SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ + } \ + AA->initialize(A); \ + return *AA; \ + } + +#define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ + CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ + CLASS *AA = nullptr; \ + switch (IRP.getPositionKind()) { \ + SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ + SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ + SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ + SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ + } \ + AA->initialize(A); \ + return *AA; \ + } + +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) + +CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) +CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) +CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) +CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) + +#undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION +#undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION +#undef SWITCH_PK_CREATE +#undef SWITCH_PK_INV + INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor", "Deduce and propagate attributes", false, false) INITIALIZE_PASS_END(AttributorLegacyPass, "attributor", Index: llvm/trunk/test/Transforms/FunctionAttrs/align.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/align.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/align.ll @@ -37,19 +37,13 @@ declare align 8 i32* @align8() -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. -; define align 8 i32* @test5_1() -; ATTRIBUTOR: define i32* @test5_1() +; ATTRIBUTOR: define align 8 i32* @test5_1() define i32* @test5_1() { %ret = tail call align 8 i32* @unknown() ret i32* %ret } -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. -; define align 8 i32* @test5_2() -; ATTRIBUTOR: define i32* @test5_2() +; ATTRIBUTOR: define align 8 i32* @test5_2() define i32* @test5_2() { %ret = tail call i32* @align8() ret i32* %ret @@ -89,10 +83,7 @@ ; Function Attrs: nounwind readnone ssp uwtable define internal i8* @f1(i8* readnone %0) local_unnamed_addr #0 { -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. -; define internal nonnull align 8 i8* @f1(i8* nonnull readnone align 8 %0) -; ATTRIBUTOR: define internal i8* @f1(i8* nonnull readnone align 8 dereferenceable(1) %0) +; ATTRIBUTOR: define internal nonnull align 8 dereferenceable(1) i8* @f1(i8* nonnull readnone align 8 dereferenceable(1) %0) %2 = icmp eq i8* %0, null br i1 %2, label %3, label %5 @@ -108,10 +99,7 @@ ; Function Attrs: nounwind readnone ssp uwtable define internal i8* @f2(i8* readnone %0) local_unnamed_addr #0 { -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. -; define internal nonnull align 8 i8* @f2(i8* nonnull readnone align 8 %0) -; ATTRIBUTOR: define internal i8* @f2(i8* nonnull readnone align 8 dereferenceable(1) %0) +; ATTRIBUTOR: define internal nonnull align 8 dereferenceable(1) i8* @f2(i8* nonnull readnone align 8 dereferenceable(1) %0) %2 = icmp eq i8* %0, null br i1 %2, label %5, label %3 @@ -133,10 +121,7 @@ ; Function Attrs: nounwind readnone ssp uwtable define internal i8* @f3(i8* readnone %0) local_unnamed_addr #0 { -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. -; define internal nonnull align 8 i8* @f3(i8* nonnull readnone align 16 %0) -; ATTRIBUTOR: define internal i8* @f3(i8* nonnull readnone align 16 dereferenceable(1) %0) +; ATTRIBUTOR: define internal nonnull align 8 dereferenceable(1) i8* @f3(i8* nonnull readnone align 16 dereferenceable(1) %0) %2 = icmp eq i8* %0, null br i1 %2, label %3, label %5 Index: llvm/trunk/test/Transforms/FunctionAttrs/arg_returned.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/arg_returned.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/arg_returned.ll @@ -806,5 +806,8 @@ ; BOTH-DAG: attributes #{{[0-9]*}} = { noinline nounwind uwtable } ; BOTH-DAG: attributes #{{[0-9]*}} = { nofree noinline nosync nounwind readnone uwtable willreturn } ; BOTH-DAG: attributes #{{[0-9]*}} = { nofree noinline nosync nounwind uwtable willreturn } +; BOTH-DAG: attributes #{{[0-9]*}} = { nofree nosync willreturn } +; BOTH-DAG: attributes #{{[0-9]*}} = { nofree nosync } +; BOTH-DAG: attributes #{{[0-9]*}} = { nofree noreturn nosync } ; BOTH-DAG: attributes #{{[0-9]*}} = { noreturn } ; BOTH-NOT: attributes # Index: llvm/trunk/test/Transforms/FunctionAttrs/dereferenceable.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/dereferenceable.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/dereferenceable.ll @@ -22,23 +22,20 @@ ; TEST 3 ; GEP inbounds define i32* @test3_1(i32* dereferenceable(8) %0) local_unnamed_addr { -; define nonnull dereferenceable(4) i32* @test3_1(i32* nonnull dereferenceable(8) %0) -; ATTRIBUTOR: define i32* @test3_1(i32* nonnull dereferenceable(8) %0) +; ATTRIBUTOR: define nonnull dereferenceable(4) i32* @test3_1(i32* nonnull dereferenceable(8) %0) %ret = getelementptr inbounds i32, i32* %0, i64 1 ret i32* %ret } define i32* @test3_2(i32* dereferenceable_or_null(32) %0) local_unnamed_addr { ; FIXME: Argument should be mark dereferenceable because of GEP `inbounds`. -; define nonnull dereferenceable(16) i32* @test3_2(i32* dereferenceable_or_null(32) %0) -; ATTRIBUTOR: define i32* @test3_2(i32* dereferenceable_or_null(32) %0) +; ATTRIBUTOR: define nonnull dereferenceable(16) i32* @test3_2(i32* dereferenceable_or_null(32) %0) %ret = getelementptr inbounds i32, i32* %0, i64 4 ret i32* %ret } define i32* @test3_3(i32* dereferenceable(8) %0, i32* dereferenceable(16) %1, i1 %2) local_unnamed_addr { -; define nonnull dereferenceable(4) i32* @test3_3(i32* nonnull dereferenceable(8) %0, i32* nonnull dereferenceable(16) %1, i1 %2) local_unnamed_addr -; ATTRIBUTOR: define i32* @test3_3(i32* nonnull dereferenceable(8) %0, i32* nonnull dereferenceable(16) %1, i1 %2) local_unnamed_addr +; ATTRIBUTOR: define nonnull dereferenceable(4) i32* @test3_3(i32* nonnull dereferenceable(8) %0, i32* nonnull dereferenceable(16) %1, i1 %2) local_unnamed_addr %ret1 = getelementptr inbounds i32, i32* %0, i64 1 %ret2 = getelementptr inbounds i32, i32* %1, i64 2 %ret = select i1 %2, i32* %ret1, i32* %ret2 Index: llvm/trunk/test/Transforms/FunctionAttrs/noalias_returned.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/noalias_returned.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/noalias_returned.ll @@ -79,19 +79,13 @@ ; TEST 5 ; Returning global pointer. Should not be noalias. -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. -; define nonnull align 8 dereferenceable(8) i8** @getter() -; CHECK: define i8** @getter() +; CHECK: define nonnull align 8 dereferenceable(8) i8** @getter() define i8** @getter() { ret i8** @G } -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; alignment for the return value here. ; Returning global pointer. Should not be noalias. -; define nonnull align 8 dereferenceable(8) i8** @calle1() -; CHECK: define i8** @calle1() +; CHECK: define nonnull align 8 dereferenceable(8) i8** @calle1() define i8** @calle1(){ %1 = call i8** @getter() ret i8** %1 Index: llvm/trunk/test/Transforms/FunctionAttrs/nonnull.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/nonnull.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/nonnull.ll @@ -8,18 +8,13 @@ ; Return a pointer trivially nonnull (call return attribute) define i8* @test1() { -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. -; FNATTR: define nonnull i8* @test1 -; ATTRIBUTOR: define i8* @test1 +; BOTH: define nonnull i8* @test1 %ret = call i8* @ret_nonnull() ret i8* %ret } ; Return a pointer trivially nonnull (argument attribute) define i8* @test2(i8* nonnull %p) { -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. ; BOTH: define nonnull i8* @test2 ret i8* %p } @@ -38,10 +33,7 @@ } define i8* @test3(i1 %c) { -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. -; FNATTR: define nonnull i8* @test3 -; ATTRIBUTOR: define i8* @test3 +; BOTH: define nonnull i8* @test3 call i8* @scc_binder(i1 %c) %ret = call i8* @ret_nonnull() ret i8* %ret @@ -87,10 +79,7 @@ ; Local analysis, but going through a self recursive phi define i8* @test6() { entry: -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. -; FNATTR: define nonnull i8* @test6 -; ATTRIBUTOR: define i8* @test6 +; BOTH: define nonnull i8* @test6 %ret = call i8* @ret_nonnull() br label %loop loop: @@ -106,10 +95,7 @@ ret i8* %b } -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. -; FNATTR: define nonnull i8* @test8 -; ATTRIBUTOR: define i8* @test8 +; BOTH: define nonnull i8* @test8 define i8* @test8(i8* %a) { %b = getelementptr inbounds i8, i8* %a, i64 1 ret i8* %b @@ -193,7 +179,7 @@ define internal i32* @f1(i32* %arg) { ; FIXME: missing nonnull It should be nonnull @f1(i32* nonnull %arg) -; ATTRIBUTOR: define internal i32* @f1(i32* %arg) +; ATTRIBUTOR: define internal nonnull i32* @f1(i32* %arg) bb: %tmp = icmp eq i32* %arg, null @@ -212,7 +198,7 @@ bb6: ; preds = %bb1 ; FIXME: missing nonnull. It should be @f2(i32* nonnull %arg) -; ATTRIBUTOR: %tmp7 = tail call i32* @f2(i32* %arg) +; ATTRIBUTOR: %tmp7 = tail call nonnull i32* @f2(i32* %arg) %tmp7 = tail call i32* @f2(i32* %arg) ret i32* %tmp7 @@ -223,11 +209,11 @@ define internal i32* @f2(i32* %arg) { ; FIXME: missing nonnull. It should be nonnull @f2(i32* nonnull %arg) -; ATTRIBUTOR: define internal i32* @f2(i32* %arg) +; ATTRIBUTOR: define internal nonnull i32* @f2(i32* %arg) bb: ; FIXME: missing nonnull. It should be @f1(i32* nonnull %arg) -; ATTRIBUTOR: %tmp = tail call i32* @f1(i32* %arg) +; ATTRIBUTOR: %tmp = tail call nonnull i32* @f1(i32* %arg) %tmp = tail call i32* @f1(i32* %arg) ret i32* %tmp } @@ -443,10 +429,7 @@ unreachable } -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. -; FNATTR: define nonnull i32* @gep1( -; ATTRIBUTOR: define i32* @gep1( +; BOTH: define nonnull i32* @gep1( define i32* @gep1(i32* %p) { %q = getelementptr inbounds i32, i32* %p, i32 1 ret i32* %q @@ -465,10 +448,7 @@ ret i32 addrspace(3)* %q } -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. -; FNATTR: define internal nonnull i32* @g2() -; ATTRIBUTOR: define internal i32* @g2() +; BOTH: define internal nonnull i32* @g2() define internal i32* @g2() { ret i32* inttoptr (i64 4 to i32*) } Index: llvm/trunk/test/Transforms/FunctionAttrs/nosync.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/nosync.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/nosync.ll @@ -27,11 +27,8 @@ ; FNATTR: Function Attrs: norecurse nounwind optsize readnone ssp uwtable ; FNATTR-NEXT: define nonnull i32* @foo(%struct.ST* readnone %s) -; FIXME: Until we have "on-demand" attribute generation we do not determine the -; return value properties. ; ATTRIBUTOR: Function Attrs: nofree nosync nounwind optsize readnone ssp uwtable -; define nonnull i32* @foo(%struct.ST* %s) -; ATTRIBUTOR-NEXT: define i32* @foo(%struct.ST* %s) +; ATTRIBUTOR-NEXT: define nonnull i32* @foo(%struct.ST* %s) define i32* @foo(%struct.ST* %s) nounwind uwtable readnone optsize ssp { entry: %arrayidx = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13 Index: llvm/trunk/test/Transforms/FunctionAttrs/read_write_returned_arguments_scc.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/read_write_returned_arguments_scc.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/read_write_returned_arguments_scc.ll @@ -161,4 +161,5 @@ ; CHECK-NOT: attributes # ; CHECK: attributes #{{.*}} = { nofree nosync nounwind } ; CHECK: attributes #{{.*}} = { nofree norecurse nosync nounwind } +; CHECK: attributes #{{.*}} = { nosync } ; CHECK-NOT: attributes #