diff --git a/bolt/lib/Passes/TailDuplication.cpp b/bolt/lib/Passes/TailDuplication.cpp --- a/bolt/lib/Passes/TailDuplication.cpp +++ b/bolt/lib/Passes/TailDuplication.cpp @@ -606,7 +606,7 @@ if (BlocksToDuplicate.empty()) continue; - // Apply the the duplication + // Apply the duplication ModifiedFunction = true; DuplicationsDynamicCount += BB->getExecutionCount(); auto DuplicatedBlocks = duplicateBlocks(*BB, BlocksToDuplicate); diff --git a/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp b/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp --- a/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp +++ b/clang-tools-extra/clang-tidy/fuchsia/MultipleInheritanceCheck.cpp @@ -95,7 +95,7 @@ void MultipleInheritanceCheck::check(const MatchFinder::MatchResult &Result) { if (const auto *D = Result.Nodes.getNodeAs("decl")) { - // Check against map to see if if the class inherits from multiple + // Check against map to see if the class inherits from multiple // concrete classes unsigned NumConcrete = 0; for (const auto &I : D->bases()) { diff --git a/clang-tools-extra/clangd/Headers.cpp b/clang-tools-extra/clangd/Headers.cpp --- a/clang-tools-extra/clangd/Headers.cpp +++ b/clang-tools-extra/clangd/Headers.cpp @@ -164,7 +164,7 @@ LastPragmaKeepInMainFileLine = SM.getLineNumber(SM.getMainFileID(), Offset) - 1; } else { - // Memorize headers that that have export pragmas in them. Include Cleaner + // Memorize headers that have export pragmas in them. Include Cleaner // does not support them properly yet, so they will be not marked as // unused. // FIXME: Once IncludeCleaner supports export pragmas, remove this. diff --git a/clang-tools-extra/clangd/Protocol.h b/clang-tools-extra/clangd/Protocol.h --- a/clang-tools-extra/clangd/Protocol.h +++ b/clang-tools-extra/clangd/Protocol.h @@ -1700,7 +1700,7 @@ bool fromJSON(const llvm::json::Value &Params, SemanticTokensDeltaParams &R, llvm::json::Path); -/// Describes a a replacement of a contiguous range of semanticTokens. +/// Describes a replacement of a contiguous range of semanticTokens. struct SemanticTokensEdit { // LSP specifies `start` and `deleteCount` which are relative to the array // encoding of the previous tokens. diff --git a/clang-tools-extra/clangd/Selection.cpp b/clang-tools-extra/clangd/Selection.cpp --- a/clang-tools-extra/clangd/Selection.cpp +++ b/clang-tools-extra/clangd/Selection.cpp @@ -395,7 +395,7 @@ // Implausible if upperbound(Tok) < First. if (auto Offset = LastAffectedToken(Tok.location())) return *Offset < First; - // A prefix of the expanded tokens may be from an an implicit + // A prefix of the expanded tokens may be from an implicit // inclusion (e.g. preamble patch, or command-line -include). return true; }); diff --git a/clang-tools-extra/clangd/TUScheduler.cpp b/clang-tools-extra/clangd/TUScheduler.cpp --- a/clang-tools-extra/clangd/TUScheduler.cpp +++ b/clang-tools-extra/clangd/TUScheduler.cpp @@ -245,7 +245,7 @@ /// threads, remove()s mostly from the main thread, and get() from ASTWorker. /// Writes are rare and reads are cheap, so we don't expect much contention. class TUScheduler::HeaderIncluderCache { - // We should be be a little careful how we store the include graph of open + // We should be a little careful how we store the include graph of open // files, as each can have a large number of transitive headers. // This representation is O(unique transitive source files). llvm::BumpPtrAllocator Arena; diff --git a/clang-tools-extra/clangd/index/CanonicalIncludes.h b/clang-tools-extra/clangd/index/CanonicalIncludes.h --- a/clang-tools-extra/clangd/index/CanonicalIncludes.h +++ b/clang-tools-extra/clangd/index/CanonicalIncludes.h @@ -42,7 +42,7 @@ /// Returns the overridden include for symbol with \p QualifiedName, or "". llvm::StringRef mapSymbol(llvm::StringRef QualifiedName) const; - /// Returns the overridden include for for files in \p Header, or "". + /// Returns the overridden include for files in \p Header, or "". llvm::StringRef mapHeader(FileEntryRef Header) const; /// Adds mapping for system headers and some special symbols (e.g. STL symbols diff --git a/clang-tools-extra/clangd/index/dex/Iterator.cpp b/clang-tools-extra/clangd/index/dex/Iterator.cpp --- a/clang-tools-extra/clangd/index/dex/Iterator.cpp +++ b/clang-tools-extra/clangd/index/dex/Iterator.cpp @@ -34,7 +34,7 @@ // When children are sorted by the estimateSize(), sync() calls are more // effective. Each sync() starts with the first child and makes sure all // children point to the same element. If any child is "above" the previous - // ones, the algorithm resets and and advances the children to the next + // ones, the algorithm resets and advances the children to the next // highest element starting from the front. When child iterators in the // beginning have smaller estimated size, the sync() will have less restarts // and become more effective. diff --git a/clang-tools-extra/clangd/support/ThreadCrashReporter.cpp b/clang-tools-extra/clangd/support/ThreadCrashReporter.cpp --- a/clang-tools-extra/clangd/support/ThreadCrashReporter.cpp +++ b/clang-tools-extra/clangd/support/ThreadCrashReporter.cpp @@ -38,7 +38,7 @@ this->Next = CurrentReporter; CurrentReporter = this; // Don't reorder subsequent operations: whatever comes after might crash and - // we want the the crash handler to see the reporter values we just set. + // we want the crash handler to see the reporter values we just set. std::atomic_signal_fence(std::memory_order_seq_cst); } @@ -46,7 +46,7 @@ assert(CurrentReporter == this); CurrentReporter = this->Next; // Don't reorder subsequent operations: whatever comes after might crash and - // we want the the crash handler to see the reporter values we just set. + // we want the crash handler to see the reporter values we just set. std::atomic_signal_fence(std::memory_order_seq_cst); } diff --git a/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp b/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp --- a/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp +++ b/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp @@ -891,7 +891,7 @@ << "Didn't expect new diagnostics when adding a/foo.h"; }); - // Forcing the reload should should cause a rebuild. + // Forcing the reload should cause a rebuild. Inputs.ForceRebuild = true; updateWithDiags( S, Source, Inputs, WantDiagnostics::Yes, diff --git a/clang-tools-extra/clangd/unittests/TestTU.cpp b/clang-tools-extra/clangd/unittests/TestTU.cpp --- a/clang-tools-extra/clangd/unittests/TestTU.cpp +++ b/clang-tools-extra/clangd/unittests/TestTU.cpp @@ -54,7 +54,7 @@ Argv.push_back("-include"); Argv.push_back(ImplicitHeaderGuard ? ImportThunk : FullHeaderName); // ms-compatibility changes the meaning of #import. - // The default is OS-dependent (on on windows), ensure it's off. + // The default is OS-dependent (on windows), ensure it's off. if (ImplicitHeaderGuard) Inputs.CompileCommand.CommandLine.push_back("-fno-ms-compatibility"); } diff --git a/clang-tools-extra/clangd/unittests/remote/MarshallingTests.cpp b/clang-tools-extra/clangd/unittests/remote/MarshallingTests.cpp --- a/clang-tools-extra/clangd/unittests/remote/MarshallingTests.cpp +++ b/clang-tools-extra/clangd/unittests/remote/MarshallingTests.cpp @@ -438,7 +438,7 @@ auto RelativePath = ProtobufMarshaller.uriToRelativePath( testPathURI("remote/project/lib/File.cpp", Strings)); ASSERT_TRUE(bool(RelativePath)); - // RemoteIndexRoot has to be be a prefix of the file path. + // RemoteIndexRoot has to be a prefix of the file path. Marshaller WrongMarshaller( /*RemoteIndexRoot=*/testPath("remote/other/project/"), /*LocalIndexRoot=*/""); diff --git a/clang-tools-extra/pp-trace/PPCallbacksTracker.cpp b/clang-tools-extra/pp-trace/PPCallbacksTracker.cpp --- a/clang-tools-extra/pp-trace/PPCallbacksTracker.cpp +++ b/clang-tools-extra/pp-trace/PPCallbacksTracker.cpp @@ -601,7 +601,7 @@ llvm::raw_string_ostream SS(Str); SS << "["; - // Each argument is is a series of contiguous Tokens, terminated by a eof. + // Each argument is a series of contiguous Tokens, terminated by a eof. // Go through each argument printing tokens until we reach eof. for (unsigned I = 0; I < Value->getNumMacroArguments(); ++I) { const Token *Current = Value->getUnexpArgument(I); diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/make-unique-inaccessible-ctors.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/make-unique-inaccessible-ctors.cpp --- a/clang-tools-extra/test/clang-tidy/checkers/modernize/make-unique-inaccessible-ctors.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/make-unique-inaccessible-ctors.cpp @@ -62,7 +62,7 @@ // "new NoCopyMoveCtor{}" is processed differently in C++14/17 and C++20: // * In C++14/17, it is recognized as aggregate initialization, // no fixes will be generated although the generated fix is compilable. - // * In C++20, it is is recognized as default constructor initialization + // * In C++20, it is recognized as default constructor initialization // (similar to "new NoCopyMoveCtor()"), the check will emit the fix and // the fix is correct. auto PNoCopyMoveCtor = std::unique_ptr(new NoCopyMoveCtor{}); diff --git a/clang/docs/DataFlowSanitizerDesign.rst b/clang/docs/DataFlowSanitizerDesign.rst --- a/clang/docs/DataFlowSanitizerDesign.rst +++ b/clang/docs/DataFlowSanitizerDesign.rst @@ -51,7 +51,7 @@ /// Retrieves the label associated with the data at the given address. dfsan_label dfsan_read_label(const void *addr, size_t size); - /// Returns whether the given label label contains the label elem. + /// Returns whether the given label contains the label elem. int dfsan_has_label(dfsan_label label, dfsan_label elem); /// Computes the union of \c l1 and \c l2, resulting in a union label. diff --git a/clang/include/clang/AST/ASTConsumer.h b/clang/include/clang/AST/ASTConsumer.h --- a/clang/include/clang/AST/ASTConsumer.h +++ b/clang/include/clang/AST/ASTConsumer.h @@ -76,7 +76,7 @@ virtual void HandleTagDeclRequiredDefinition(const TagDecl *D) {} /// Invoked when a function is implicitly instantiated. - /// Note that at this point point it does not have a body, its body is + /// Note that at this point it does not have a body, its body is /// instantiated at the end of the translation unit and passed to /// HandleTopLevelDecl. virtual void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) {} diff --git a/clang/include/clang/AST/DeclCXX.h b/clang/include/clang/AST/DeclCXX.h --- a/clang/include/clang/AST/DeclCXX.h +++ b/clang/include/clang/AST/DeclCXX.h @@ -1172,7 +1172,7 @@ /// Determine whether this class has a pure virtual function. /// - /// The class is is abstract per (C++ [class.abstract]p2) if it declares + /// The class is abstract per (C++ [class.abstract]p2) if it declares /// a pure virtual function or inherits a pure virtual function that is /// not overridden. bool isAbstract() const { return data().Abstract; } diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h --- a/clang/include/clang/AST/DeclTemplate.h +++ b/clang/include/clang/AST/DeclTemplate.h @@ -274,8 +274,7 @@ /// /// This operation assumes that the input argument list outlives it. /// This takes the list as a pointer to avoid looking like a copy - /// constructor, since this really really isn't safe to use that - /// way. + /// constructor, since this really isn't safe to use that way. explicit TemplateArgumentList(const TemplateArgumentList *Other) : Arguments(Other->data()), NumArguments(Other->size()) {} diff --git a/clang/include/clang/AST/Expr.h b/clang/include/clang/AST/Expr.h --- a/clang/include/clang/AST/Expr.h +++ b/clang/include/clang/AST/Expr.h @@ -1982,7 +1982,7 @@ LFunction, // Same as Function, but as wide string. FuncDName, FuncSig, - LFuncSig, // Same as FuncSig, but as as wide string + LFuncSig, // Same as FuncSig, but as wide string PrettyFunction, /// The same as PrettyFunction, except that the /// 'virtual' keyword is omitted for virtual member functions. diff --git a/clang/include/clang/AST/ExternalASTMerger.h b/clang/include/clang/AST/ExternalASTMerger.h --- a/clang/include/clang/AST/ExternalASTMerger.h +++ b/clang/include/clang/AST/ExternalASTMerger.h @@ -118,7 +118,7 @@ /// Asks all connected ASTImporters if any of them imported the given /// declaration. If any ASTImporter did import the given declaration, /// then this function returns the declaration that D was imported from. - /// Returns nullptr if no ASTImporter did import import D. + /// Returns nullptr if no ASTImporter did import D. Decl *FindOriginalDecl(Decl *D); /// Add a set of ASTContexts as possible origins. diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -980,7 +980,7 @@ /// Return the number of loops generated by this loop transformation. unsigned getNumGeneratedLoops() { return NumGeneratedLoops; } - /// Get the de-sugared statements after after the loop transformation. + /// Get the de-sugared statements after the loop transformation. /// /// Might be nullptr if either the directive generates no loops and is handled /// directly in CodeGen, or resolving a template-dependence context is @@ -2873,7 +2873,7 @@ /// /// \param C AST context. /// \param NumClauses Number of clauses. - /// \param IsStandalone true, if the the standalone directive is created. + /// \param IsStandalone true, if the standalone directive is created. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, diff --git a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h --- a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h +++ b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h @@ -464,7 +464,7 @@ /// restricts the node types for \p Kind. DynTypedMatcher dynCastTo(const ASTNodeKind Kind) const; - /// Return a matcher that that points to the same implementation, but sets the + /// Return a matcher that points to the same implementation, but sets the /// traversal kind. /// /// If the traversal kind is already set, then \c TK overrides it. diff --git a/clang/include/clang/Analysis/CFG.h b/clang/include/clang/Analysis/CFG.h --- a/clang/include/clang/Analysis/CFG.h +++ b/clang/include/clang/Analysis/CFG.h @@ -264,7 +264,7 @@ }; /// Represents the point where a loop ends. -/// This element is is only produced when building the CFG for the static +/// This element is only produced when building the CFG for the static /// analyzer and hidden behind the 'cfg-loopexit' analyzer config flag. /// /// Note: a loop exit element can be reached even when the loop body was never diff --git a/clang/include/clang/Analysis/CloneDetection.h b/clang/include/clang/Analysis/CloneDetection.h --- a/clang/include/clang/Analysis/CloneDetection.h +++ b/clang/include/clang/Analysis/CloneDetection.h @@ -260,7 +260,7 @@ /// /// Clones that aren't type II clones are moved into separate clone groups. /// In contrast to the RecursiveCloneTypeIIHashConstraint, all clones in a clone -/// group are guaranteed to be be type II clones of each other, but it is too +/// group are guaranteed to be type II clones of each other, but it is too /// slow to efficiently handle large amounts of clones. class RecursiveCloneTypeIIVerifyConstraint { public: diff --git a/clang/include/clang/Analysis/PathDiagnostic.h b/clang/include/clang/Analysis/PathDiagnostic.h --- a/clang/include/clang/Analysis/PathDiagnostic.h +++ b/clang/include/clang/Analysis/PathDiagnostic.h @@ -73,7 +73,7 @@ bool ShouldSerializeStats = false; /// If the consumer intends to produce multiple output files, should it - /// use a pseudo-random file name name or a human-readable file name. + /// use a pseudo-random file name or a human-readable file name. bool ShouldWriteVerboseReportFilename = false; /// Whether the consumer should treat consumed diagnostics as hard errors. diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -333,7 +333,7 @@ CODEGENOPT(DebugStrictDwarf, 1, 1) ///< Whether or not to use strict DWARF info. CODEGENOPT(EnableAssignmentTracking, 1,0) ///< Enable the Assignment Tracking - ///< debug info feature feature. + ///< debug info feature. CODEGENOPT(DebugColumnInfo, 1, 0) ///< Whether or not to use column information ///< in debug info. diff --git a/clang/include/clang/Basic/LangStandards.def b/clang/include/clang/Basic/LangStandards.def --- a/clang/include/clang/Basic/LangStandards.def +++ b/clang/include/clang/Basic/LangStandards.def @@ -17,7 +17,7 @@ /// \param LANG - The Language for which this is a standard. /// \param DESC - A short description of the standard. /// \param FEATURES - The standard features as flags, these are enums from the -/// clang::frontend namespace, which is assumed to be be available. +/// clang::frontend namespace, which is assumed to be available. /// LANGSTANDARD_ALIAS(IDENT, ALIAS) /// \param IDENT - The name of the standard as a C++ identifier. diff --git a/clang/include/clang/Basic/Sarif.h b/clang/include/clang/Basic/Sarif.h --- a/clang/include/clang/Basic/Sarif.h +++ b/clang/include/clang/Basic/Sarif.h @@ -372,7 +372,7 @@ /// attributes. However, it requires an ordering among certain method calls: /// /// 1. Because every SARIF document must contain at least 1 \c run, callers -/// must ensure that \ref SarifDocumentWriter::createRun is is called before +/// must ensure that \ref SarifDocumentWriter::createRun is called before /// any other methods. /// 2. If SarifDocumentWriter::endRun is called, callers MUST call /// SarifDocumentWriter::createRun, before invoking any of the result diff --git a/clang/include/clang/Basic/Specifiers.h b/clang/include/clang/Basic/Specifiers.h --- a/clang/include/clang/Basic/Specifiers.h +++ b/clang/include/clang/Basic/Specifiers.h @@ -31,7 +31,7 @@ /// Define the kind of constexpr specifier. enum class ConstexprSpecKind { Unspecified, Constexpr, Consteval, Constinit }; - /// In an if statement, this denotes whether the the statement is + /// In an if statement, this denotes whether the statement is /// a constexpr or consteval if statement. enum class IfStatementKind : unsigned { Ordinary, diff --git a/clang/include/clang/CrossTU/CrossTranslationUnit.h b/clang/include/clang/CrossTU/CrossTranslationUnit.h --- a/clang/include/clang/CrossTU/CrossTranslationUnit.h +++ b/clang/include/clang/CrossTU/CrossTranslationUnit.h @@ -101,7 +101,7 @@ using InvocationListTy = llvm::StringMap>; /// Parse the YAML formatted invocation list file content \p FileContent. -/// The format is expected to be a mapping from from absolute source file +/// The format is expected to be a mapping from absolute source file /// paths in the filesystem to a list of command-line parts, which /// constitute the invocation needed to compile that file. That invocation /// will be used to produce the AST of the TU. diff --git a/clang/include/clang/Sema/CodeCompleteConsumer.h b/clang/include/clang/Sema/CodeCompleteConsumer.h --- a/clang/include/clang/Sema/CodeCompleteConsumer.h +++ b/clang/include/clang/Sema/CodeCompleteConsumer.h @@ -429,7 +429,7 @@ } }; -/// Get string representation of \p Kind, useful for for debugging. +/// Get string representation of \p Kind, useful for debugging. llvm::StringRef getCompletionKindString(CodeCompletionContext::Kind Kind); /// A "string" used to describe how code completion can diff --git a/clang/include/clang/Sema/ParsedAttr.h b/clang/include/clang/Sema/ParsedAttr.h --- a/clang/include/clang/Sema/ParsedAttr.h +++ b/clang/include/clang/Sema/ParsedAttr.h @@ -432,7 +432,7 @@ return *getTrailingObjects(); } - /// The property data immediately follows the object is is mutually exclusive + /// The property data immediately follows the object is mutually exclusive /// with arguments. detail::PropertyData &getPropertyDataBuffer() { assert(IsProperty); diff --git a/clang/include/clang/Sema/ScopeInfo.h b/clang/include/clang/Sema/ScopeInfo.h --- a/clang/include/clang/Sema/ScopeInfo.h +++ b/clang/include/clang/Sema/ScopeInfo.h @@ -883,7 +883,7 @@ /// This is specifically useful for generic lambdas or /// lambdas within a potentially evaluated-if-used context. /// If an enclosing variable is named in an expression of a lambda nested - /// within a generic lambda, we don't always know know whether the variable + /// within a generic lambda, we don't always know whether the variable /// will truly be odr-used (i.e. need to be captured) by that nested lambda, /// until its instantiation. But we still need to capture it in the /// enclosing lambda if all intervening lambdas can capture the variable. diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -8267,7 +8267,7 @@ SmallVectorImpl &CanonicalConverted, CheckTemplateArgumentKind CTAK); - /// Check that the given template arguments can be be provided to + /// Check that the given template arguments can be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h @@ -360,13 +360,13 @@ void processSwitch(SwitchNodeBuilder& builder); /// Called by CoreEngine. Used to notify checkers that processing a - /// function has begun. Called for both inlined and and top-level functions. + /// function has begun. Called for both inlined and top-level functions. void processBeginOfFunction(NodeBuilderContext &BC, ExplodedNode *Pred, ExplodedNodeSet &Dst, const BlockEdge &L); /// Called by CoreEngine. Used to notify checkers that processing a - /// function has ended. Called for both inlined and and top-level functions. + /// function has ended. Called for both inlined and top-level functions. void processEndOfFunction(NodeBuilderContext& BC, ExplodedNode *Pred, const ReturnStmt *RS = nullptr); diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h --- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h +++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h @@ -271,7 +271,7 @@ /// A virtual file system optimized for the dependency discovery. /// -/// It is primarily designed to work with source files whose contents was was +/// It is primarily designed to work with source files whose contents was /// preprocessed to remove any tokens that are unlikely to affect the dependency /// computation. /// diff --git a/clang/include/clang/Tooling/Refactoring/Extract/Extract.h b/clang/include/clang/Tooling/Refactoring/Extract/Extract.h --- a/clang/include/clang/Tooling/Refactoring/Extract/Extract.h +++ b/clang/include/clang/Tooling/Refactoring/Extract/Extract.h @@ -22,7 +22,7 @@ /// Initiates the extract function refactoring operation. /// /// \param Code The selected set of statements. - /// \param DeclName The name name of the extract function. If None, + /// \param DeclName The name of the extract function. If None, /// "extracted" is used. static Expected initiate(RefactoringRuleContext &Context, CodeRangeASTSelection Code, diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -6520,7 +6520,7 @@ if (!X->getFriendObjectKind() || !Y->getFriendObjectKind()) return false; - // If the the two functions share lexical declaration context, they are not in + // If the two functions share lexical declaration context, they are not in // separate instantations, and thus in the same scope. if (X->getLexicalDeclContext() == Y->getLexicalDeclContext()) return false; diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp --- a/clang/lib/AST/ASTDiagnostic.cpp +++ b/clang/lib/AST/ASTDiagnostic.cpp @@ -538,7 +538,7 @@ bool ShowColor; /// FromTemplateType - When single type printing is selected, this is the - /// type to be be printed. When tree printing is selected, this type will + /// type to be printed. When tree printing is selected, this type will /// show up first in the tree. QualType FromTemplateType; diff --git a/clang/lib/Basic/DiagnosticIDs.cpp b/clang/lib/Basic/DiagnosticIDs.cpp --- a/clang/lib/Basic/DiagnosticIDs.cpp +++ b/clang/lib/Basic/DiagnosticIDs.cpp @@ -546,7 +546,7 @@ if (Result == diag::Severity::Ignored) return Result; - // Honor -w: this disables all messages which which are not Error/Fatal by + // Honor -w: this disables all messages which are not Error/Fatal by // default (disregarding attempts to upgrade severity from Warning to Error), // as well as disabling all messages which are currently mapped to Warning // (whether by default or downgraded from Error via e.g. -Wno-error or #pragma diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp --- a/clang/lib/Basic/SourceManager.cpp +++ b/clang/lib/Basic/SourceManager.cpp @@ -797,7 +797,7 @@ // most newly created FileID. // LessIndex - This is the lower bound of the range that we're searching. - // We know that the offset corresponding to the FileID is is less than + // We know that the offset corresponding to the FileID is less than // SLocOffset. unsigned LessIndex = 0; // upper bound of the search range. diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h --- a/clang/lib/CodeGen/CGCUDARuntime.h +++ b/clang/lib/CodeGen/CGCUDARuntime.h @@ -55,7 +55,7 @@ /// The kind flag for an offloading entry. enum OffloadEntryKindFlag : uint32_t { /// Mark the entry as a global entry. This indicates the presense of a - /// kernel if the size size field is zero and a variable otherwise. + /// kernel if the size field is zero and a variable otherwise. OffloadGlobalEntry = 0x0, /// Mark the entry as a managed global variable. OffloadGlobalManagedEntry = 0x1, diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp --- a/clang/lib/CodeGen/CGClass.cpp +++ b/clang/lib/CodeGen/CGClass.cpp @@ -2583,7 +2583,7 @@ llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true) ->getPointerTo(ProgAS) ->getPointerTo(GlobalsAS); - // vtable field is is derived from `this` pointer, therefore they should be in + // vtable field is derived from `this` pointer, therefore they should be in // the same addr space. Note that this might not be LLVM address space 0. VTableField = Builder.CreateElementBitCast(VTableField, VTablePtrTy); VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy); diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp --- a/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/clang/lib/CodeGen/CGDeclCXX.cpp @@ -740,7 +740,7 @@ CodeGenFunction(*this).GenerateCXXGlobalInitFunc( Fn, ModuleInits, ConstantAddress(Guard, Int8Ty, GuardAlign)); // We allow for the case that a module object is added to a linked binary - // without a specific call to the the initializer. This also ensure that + // without a specific call to the initializer. This also ensure that // implementation partition initializers are called when the partition // is not imported as an interface. AddGlobalCtor(Fn); diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h --- a/clang/lib/CodeGen/CGOpenMPRuntime.h +++ b/clang/lib/CodeGen/CGOpenMPRuntime.h @@ -560,7 +560,7 @@ /// metadata. void loadOffloadInfoMetadata(); - /// Start scanning from statement \a S and and emit all target regions + /// Start scanning from statement \a S and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -2231,7 +2231,7 @@ /// Emit the unified return block, trying to avoid its emission when /// possible. /// \return The debug location of the user written return statement if the - /// return block is is avoided. + /// return block is avoided. llvm::DebugLoc EmitReturnBlock(); /// FinishFunction - Complete IR generation of the current function. It is @@ -2884,7 +2884,7 @@ AggValueSlot::Overlap_t Overlap, SourceLocation Loc, bool NewPointerIsChecked); - /// Emit assumption load for all bases. Requires to be be called only on + /// Emit assumption load for all bases. Requires to be called only on /// most-derived class and not under construction of the object. void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This); diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp --- a/clang/lib/CodeGen/CodeGenTBAA.cpp +++ b/clang/lib/CodeGen/CodeGenTBAA.cpp @@ -338,7 +338,7 @@ using TBAAStructField = llvm::MDBuilder::TBAAStructField; SmallVector Fields; if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) { - // Handle C++ base classes. Non-virtual bases can treated a a kind of + // Handle C++ base classes. Non-virtual bases can treated a kind of // field. Virtual bases are more complex and omitted, but avoid an // incomplete view for NewStructPathTBAA. if (CodeGenOpts.NewStructPathTBAA && CXXRD->getNumVBases() != 0) diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -289,7 +289,7 @@ CodeGenFunction::VPtr Vptr) override; /// Don't initialize vptrs if dynamic class - /// is marked with with the 'novtable' attribute. + /// is marked with the 'novtable' attribute. bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { return !VTableClass->hasAttr(); } diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -2618,7 +2618,7 @@ llvm::StringMap CalleeMap; unsigned ArgIndex = 0; - // We need to loop through the actual call arguments rather than the the + // We need to loop through the actual call arguments rather than the // function's parameters, in case this variadic. for (const CallArg &Arg : Args) { // The "avx" feature changes how vectors >128 in size are passed. "avx512f" @@ -10305,7 +10305,7 @@ void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, bool IsRecursive) { if (!ID || IncompleteUsedCount) - return; // No key or it is is an incomplete sub-type so don't add. + return; // No key or it is an incomplete sub-type so don't add. Entry &E = Map[ID]; if (IsRecursive && !E.Str.empty()) { assert(E.State==Recursive && E.Str.size() == Str.size() && diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -2447,7 +2447,7 @@ // they can be influenced by linker flags the clang driver might not // understand. // Examples: - // - `clang-cl main.cc ole32.lib` in a a non-MSVC shell will make the driver + // - `clang-cl main.cc ole32.lib` in a non-MSVC shell will make the driver // module look for an MSVC installation in the registry. (We could ask // the MSVCToolChain object if it can find `ole32.lib`, but the logic to // look in the registry might move into lld-link in the future so that diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp --- a/clang/lib/Driver/ToolChains/Darwin.cpp +++ b/clang/lib/Driver/ToolChains/Darwin.cpp @@ -44,7 +44,7 @@ // The matching this routine does is fairly pointless, since it is neither the // complete architecture list, nor a reasonable subset. The problem is that - // historically the driver driver accepts this and also ties its -march= + // historically the driver accepts this and also ties its -march= // handling to the architecture name, so we need to be careful before removing // support for it. @@ -59,7 +59,7 @@ .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4", llvm::Triple::x86) .Cases("x86_64", "x86_64h", llvm::Triple::x86_64) - // This is derived from the driver driver. + // This is derived from the driver. .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm) .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm) .Cases("armv7s", "xscale", llvm::Triple::arm) @@ -2662,7 +2662,7 @@ } // Add the arch options based on the particular spelling of -arch, to match - // how the driver driver works. + // how the driver works. if (!BoundArch.empty()) { StringRef Name = BoundArch; const Option MCpu = Opts.getOption(options::OPT_mcpu_EQ); diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -84,7 +84,7 @@ RenderExtraToolArgs(JA, CmdArgs); - // If using a driver driver, force the arch. + // If using a driver, force the arch. if (getToolChain().getTriple().isOSDarwin()) { CmdArgs.push_back("-arch"); CmdArgs.push_back( diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp --- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp +++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp @@ -637,7 +637,7 @@ serializeMembers(Record, Category->Methods); serializeMembers(Record, Category->Properties); - // Surface the protocols of the the category to the interface. + // Surface the protocols of the category to the interface. for (const auto &Protocol : Category->Protocols) serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol); } diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -313,7 +313,7 @@ } // Infer the role of the l_paren based on the previous token if we haven't - // detected one one yet. + // detected one yet. if (PrevNonComment && OpeningParen.is(TT_Unknown)) { if (PrevNonComment->is(tok::kw___attribute)) { OpeningParen.setType(TT_AttributeParen); @@ -4896,7 +4896,7 @@ // // instead, even if it is longer by one line. // - // Note that this allows allows the "{" to go over the column limit + // Note that this allows the "{" to go over the column limit // when the column limit is just between ":" and "{", but that does // not happen too often and alternative formattings in this case are // not much better. diff --git a/clang/lib/Format/WhitespaceManager.h b/clang/lib/Format/WhitespaceManager.h --- a/clang/lib/Format/WhitespaceManager.h +++ b/clang/lib/Format/WhitespaceManager.h @@ -199,7 +199,7 @@ SmallVector CellCounts; unsigned InitialSpaces = 0; - // Determine if every row in the the array + // Determine if every row in the array // has the same number of columns. bool isRectangular() const { if (CellCounts.empty()) diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h --- a/clang/lib/Headers/smmintrin.h +++ b/clang/lib/Headers/smmintrin.h @@ -818,7 +818,7 @@ /// parameter, is copied to the result. /// \param N /// Specifies which bits from operand \a Y will be copied, which bits in the -/// result they will be be copied to, and which bits in the result will be +/// result they will be copied to, and which bits in the result will be /// cleared. The following assignments are made: \n /// Bits [7:6] specify the bits to copy from operand \a Y: \n /// 00: Selects bits [31:0] from operand \a Y. \n diff --git a/clang/lib/Lex/MacroInfo.cpp b/clang/lib/Lex/MacroInfo.cpp --- a/clang/lib/Lex/MacroInfo.cpp +++ b/clang/lib/Lex/MacroInfo.cpp @@ -118,7 +118,7 @@ if (A.getKind() != B.getKind()) return false; - // If this isn't the first first token, check that the whitespace and + // If this isn't the first token, check that the whitespace and // start-of-line characteristics match. if (i != 0 && (A.isAtStartOfLine() != B.isAtStartOfLine() || diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp --- a/clang/lib/Parse/ParseStmt.cpp +++ b/clang/lib/Parse/ParseStmt.cpp @@ -764,7 +764,7 @@ // otherwise in the same context as the labeled-statement. StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC; - // It is very very common for code to contain many case statements recursively + // It is very common for code to contain many case statements recursively // nested, as in (but usually without indentation): // case 1: // case 2: diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -5557,7 +5557,7 @@ SourceLocation CallSiteLoc) { assert((FDecl || Proto) && "Need a function declaration or prototype"); - // Already checked by by constant evaluator. + // Already checked by constant evaluator. if (S.isConstantEvaluated()) return; // Check the attributes attached to the method/function itself. @@ -17722,7 +17722,7 @@ } else ColumnsExpr = nullptr; - // If any any part of the result matrix type is still pending, just use + // If any part of the result matrix type is still pending, just use // Context.DependentTy, until all parts are resolved. if ((RowsExpr && RowsExpr->isTypeDependent()) || (ColumnsExpr && ColumnsExpr->isTypeDependent())) { diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp --- a/clang/lib/Sema/SemaCodeComplete.cpp +++ b/clang/lib/Sema/SemaCodeComplete.cpp @@ -8514,7 +8514,7 @@ CodeCompleter->getCodeCompletionTUInfo(), CodeCompletionContext::CCC_ObjCCategoryName); - // Add all of the categories that have have corresponding interface + // Add all of the categories that have corresponding interface // declarations in this class and any of its superclasses, except for // already-implemented categories in the class itself. llvm::SmallPtrSet CategoryNames; diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -19339,7 +19339,7 @@ } } - // If we have have an empty set of enumerators we still need one bit. + // If we have an empty set of enumerators we still need one bit. // From [dcl.enum]p8 // If the enumerator-list is empty, the values of the enumeration are as if // the enumeration had a single enumerator with value 0 diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -3150,7 +3150,7 @@ if (LO.CPlusPlus && !LO.CPlusPlus20) S.Diag(AL.getLoc(), diag::ext_cxx20_attr) << AL; - // Since this this is spelled [[nodiscard]], get the optional string + // Since this is spelled [[nodiscard]], get the optional string // literal. If in C++ mode, but not in C++2a mode, diagnose as an // extension. // FIXME: C2x should support this feature as well, even as an extension. diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -3100,7 +3100,7 @@ return; if (MD && !MD->isVirtual()) { - // If we have a non-virtual method, check if if hides a virtual method. + // If we have a non-virtual method, check if it hides a virtual method. // (In that case, it's most likely the method has the wrong type.) SmallVector OverloadedMethods; FindHiddenVirtualMethods(MD, OverloadedMethods); @@ -12981,7 +12981,7 @@ // Salient point: SS doesn't have to name a base class as long as // lookup only finds members from base classes. Therefore we can - // diagnose here only if we can prove that that can't happen, + // diagnose here only if we can prove that can't happen, // i.e. if the class hierarchies provably don't intersect. // TODO: it would be nice if "definitely valid" results were cached diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -8439,7 +8439,7 @@ return DRE->getFoundDecl(); if (auto *ME = dyn_cast(E)) return ME->getFoundDecl(); - // FIXME: Add any other expr types that could be be seen by the delayed typo + // FIXME: Add any other expr types that could be seen by the delayed typo // correction TreeTransform for which the corresponding TypoCorrection could // contain multiple decls. return nullptr; diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp --- a/clang/lib/Sema/SemaInit.cpp +++ b/clang/lib/Sema/SemaInit.cpp @@ -1528,8 +1528,8 @@ // As an extension, clang supports complex initializers, which initialize // a complex number component-wise. When an explicit initializer list for - // a complex number contains two two initializers, this extension kicks in: - // it exepcts the initializer list to contain two elements convertible to + // a complex number contains two initializers, this extension kicks in: + // it expects the initializer list to contain two elements convertible to // the element type of the complex type. The first element initializes // the real part, and the second element intitializes the imaginary part. diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -20969,8 +20969,8 @@ } // Pointer arithmetic is the only thing we expect to happen here so after we - // make sure the binary operator is a pointer type, the we only thing need - // to to is to visit the subtree that has the same type as root (so that we + // make sure the binary operator is a pointer type, the only thing we need + // to do is to visit the subtree that has the same type as root (so that we // know the other subtree is just an offset) Expr *LE = BO->getLHS()->IgnoreParenImpCasts(); Expr *RE = BO->getRHS()->IgnoreParenImpCasts(); @@ -21740,7 +21740,7 @@ /*WhereFoundClauseKind=*/OMPC_map); // Save the components and declaration to create the clause. For purposes of - // the clause creation, any component list that has has base 'this' uses + // the clause creation, any component list that has base 'this' uses // null as base declaration. MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1); MVLI.VarComponents.back().append(CurComponents.begin(), diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -10289,7 +10289,7 @@ } else if (Cand->NotValidBecauseConstraintExprHasError()) { // This candidate has constraint that we were unable to evaluate because // it referenced an expression that contained an error. Rather than fall - // back onto a potentially unintended candidate (made worse by by + // back onto a potentially unintended candidate (made worse by // subsuming constraints), treat this as 'no viable candidate'. Best = end(); return OR_No_Viable_Function; diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp --- a/clang/lib/Sema/SemaStmtAttr.cpp +++ b/clang/lib/Sema/SemaStmtAttr.cpp @@ -311,7 +311,7 @@ // come in two variants: a state form and a numeric form. The state form // selectively defaults/enables/disables the transformation for the loop // (for unroll, default indicates full unrolling rather than enabling the - // transformation). The numeric form form provides an integer hint (for + // transformation). The numeric form provides an integer hint (for // example, unroll count) to the transformer. The following array accumulates // the hints encountered while iterating through the attributes to check for // compatibility. diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -2268,7 +2268,7 @@ // In C++, the previous declaration we find might be a tag type // (class or enum). In this case, the new declaration will hide the - // tag type. Note that this does does not apply if we're declaring a + // tag type. Note that this does not apply if we're declaring a // typedef (C++ [dcl.typedef]p4). if (Previous.isSingleTagDecl()) Previous.clear(); @@ -2661,7 +2661,7 @@ // In C++, the previous declaration we find might be a tag type // (class or enum). In this case, the new declaration will hide the - // tag type. Note that this does does not apply if we're declaring a + // tag type. Note that this does not apply if we're declaring a // typedef (C++ [dcl.typedef]p4). if (Previous.isSingleTagDecl()) Previous.clear(); diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp --- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp @@ -1004,7 +1004,7 @@ return false; } -/// Returns true if the ID is a class in which which is known to have +/// Returns true if the ID is a class in which is known to have /// a separate teardown lifecycle. In this case, -dealloc warnings /// about missing releases should be suppressed. bool ObjCDeallocChecker::classHasSeparateTeardown( diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp --- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -854,7 +854,7 @@ // If a variable is dead (is not referenced directly or indirectly after // some point), it will be removed from the Store before the end of its // actual lifetime. - // This means that that if the ownership status didn't change, CurrOwners + // This means that if the ownership status didn't change, CurrOwners // must be a superset of, but not necessarily equal to ExitOwners. return !llvm::set_is_subset(ExitOwners, CurrOwners); } diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp --- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp @@ -969,7 +969,7 @@ // Let's pick one of them at random (if there is something to pick from). AllocBindingToReport = AllVarBindings[0].first; - // Because 'AllocBindingToReport' is not the the same as + // Because 'AllocBindingToReport' is not the same as // 'AllocFirstBinding', we need to explain how the leaking object // got from one to another. // diff --git a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp --- a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp @@ -32,7 +32,7 @@ namespace { /// Emitsd minimal diagnostics (report message + notes) for the 'none' output -/// type to the standard error, or to to compliment many others. Emits detailed +/// type to the standard error, or to compliment many others. Emits detailed /// diagnostics in textual format for the 'text' output type. class TextDiagnostics : public PathDiagnosticConsumer { PathDiagnosticConsumerOptions DiagOpts; diff --git a/clang/test/Analysis/lambdas.cpp b/clang/test/Analysis/lambdas.cpp --- a/clang/test/Analysis/lambdas.cpp +++ b/clang/test/Analysis/lambdas.cpp @@ -338,7 +338,7 @@ local1++; }; - // Don't treat as a dead store because local1 was was captured by reference. + // Don't treat as a dead store because local1 was captured by reference. local1 = 7; // no-warning lambda1(); @@ -349,7 +349,7 @@ local2++; // Implicit capture by reference }; - // Don't treat as a dead store because local2 was was captured by reference. + // Don't treat as a dead store because local2 was captured by reference. local2 = 7; // no-warning lambda2(); diff --git a/clang/test/Parser/declarators.c b/clang/test/Parser/declarators.c --- a/clang/test/Parser/declarators.c +++ b/clang/test/Parser/declarators.c @@ -60,7 +60,7 @@ return &b.y; // expected-warning {{incompatible pointer types returning 'int *' from a function with result type 'float *'}} } -struct xyz test8(void) { return a; } // a should be be marked invalid, no diag. +struct xyz test8(void) { return a; } // a should be marked invalid, no diag. // Verify that implicit int still works. diff --git a/clang/test/Sema/builtin-align.c b/clang/test/Sema/builtin-align.c --- a/clang/test/Sema/builtin-align.c +++ b/clang/test/Sema/builtin-align.c @@ -64,7 +64,7 @@ _Static_assert(check_same_type(void *, result_type_ptr), "Should return void*"); _Static_assert(check_same_type(int, result_type_int), "Should return int"); _Static_assert(check_same_type(long, result_type_long), "Should return long"); - // Check that we can use the alignment builtins on on array types (result should decay) + // Check that we can use the alignment builtins on array types (result should decay) _Static_assert(check_same_type(char *, result_type_char_array), "Using the builtins on an array should yield the decayed type"); #endif diff --git a/clang/test/SemaCXX/warn-thread-safety-parsing.cpp b/clang/test/SemaCXX/warn-thread-safety-parsing.cpp --- a/clang/test/SemaCXX/warn-thread-safety-parsing.cpp +++ b/clang/test/SemaCXX/warn-thread-safety-parsing.cpp @@ -1240,7 +1240,7 @@ int temp; void empty_back_edge() { - // Create a back edge to a block with with no statements + // Create a back edge to a block with no statements for (;;) { ++temp; if (temp > 10) break; diff --git a/clang/test/SemaTemplate/temp_arg_nontype.cpp b/clang/test/SemaTemplate/temp_arg_nontype.cpp --- a/clang/test/SemaTemplate/temp_arg_nontype.cpp +++ b/clang/test/SemaTemplate/temp_arg_nontype.cpp @@ -398,7 +398,7 @@ namespace partial_order_references { // FIXME: The standard does not appear to consider the second specialization - // to be more more specialized than the first! The problem is that deducing + // to be more specialized than the first! The problem is that deducing // an 'int&' parameter from an argument 'R' results in a type mismatch, // because the parameter has a reference type and the argument is an // expression and thus does not have reference type. We resolve this by diff --git a/clang/unittests/Analysis/CloneDetectionTest.cpp b/clang/unittests/Analysis/CloneDetectionTest.cpp --- a/clang/unittests/Analysis/CloneDetectionTest.cpp +++ b/clang/unittests/Analysis/CloneDetectionTest.cpp @@ -64,7 +64,7 @@ CloneDetectionVisitor Visitor(Detector); Visitor.TraverseTranslationUnitDecl(TU); - // Find clones with the usual settings, but but we want to filter out + // Find clones with the usual settings, but we want to filter out // all statements from functions which names start with "bar". std::vector CloneGroups; Detector.findClones(CloneGroups, NoBarFunctionConstraint(), diff --git a/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/clang/utils/TableGen/ClangSACheckersEmitter.cpp --- a/clang/utils/TableGen/ClangSACheckersEmitter.cpp +++ b/clang/utils/TableGen/ClangSACheckersEmitter.cpp @@ -219,7 +219,7 @@ // - DESCRIPTION // - DEFAULT: The default value for this option. // - // The full option can be specified in the command like like this: + // The full option can be specified in the command like this: // -analyzer-config PACKAGENAME:OPTIONNAME=VALUE OS << "\n" "#ifdef GET_PACKAGE_OPTIONS\n"; @@ -319,7 +319,7 @@ // - DESCRIPTION // - DEFAULT: The default value for this option. // - // The full option can be specified in the command like like this: + // The full option can be specified in the command like this: // -analyzer-config CHECKERNAME:OPTIONNAME=VALUE OS << "\n" "#ifdef GET_CHECKER_OPTIONS\n"; diff --git a/compiler-rt/include/sanitizer/dfsan_interface.h b/compiler-rt/include/sanitizer/dfsan_interface.h --- a/compiler-rt/include/sanitizer/dfsan_interface.h +++ b/compiler-rt/include/sanitizer/dfsan_interface.h @@ -62,7 +62,7 @@ /// from the address addr. dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size); -/// Returns whether the given label label contains the label elem. +/// Returns whether the given label contains the label elem. int dfsan_has_label(dfsan_label label, dfsan_label elem); /// Flushes the DFSan shadow, i.e. forgets about all labels currently associated diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -1158,7 +1158,7 @@ // This is necessary to prevent false positive leaks due to the AsanThread // holding the only live reference to a heap object. This can happen because // the `pthread_create()` interceptor doesn't wait for the child thread to - // start before returning and thus loosing the the only live reference to the + // start before returning and thus loosing the only live reference to the // heap object on the stack. __asan::AsanThreadContext *atctx = diff --git a/compiler-rt/lib/builtins/arm/udivsi3.S b/compiler-rt/lib/builtins/arm/udivsi3.S --- a/compiler-rt/lib/builtins/arm/udivsi3.S +++ b/compiler-rt/lib/builtins/arm/udivsi3.S @@ -203,7 +203,7 @@ LOCAL_LABEL(block_skip_##shift) :; \ adcs r3, r3 // same as ((r3 << 1) | Carry). Carry is set if r0 >= r2. - // TODO: if current location counter is not not word aligned, we don't + // TODO: if current location counter is not word aligned, we don't // need the .p2align and nop // Label div0block must be word-aligned. First align block 31 .p2align 2 diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cpp @@ -208,7 +208,7 @@ char tinybuf[10]; trace.PrintTo(tinybuf, sizeof(tinybuf)); - // This the the truncation case. + // This the truncation case. ASSERT_GT(actual_len, sizeof(tinybuf)); // The truncated contents should be a prefix of the full contents. diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp --- a/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl-old/tsan_interceptors_posix.cpp @@ -2507,7 +2507,7 @@ // Copy act into sigactions[sig]. // Can't use struct copy, because compiler can emit call to memcpy. // Can't use internal_memcpy, because it copies byte-by-byte, - // and signal handler reads the handler concurrently. It it can read + // and signal handler reads the handler concurrently. It can read // some bytes from old value and some bytes from new value. // Use volatile to prevent insertion of memcpy. sigactions[sig].handler = diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -2526,7 +2526,7 @@ // Copy act into sigactions[sig]. // Can't use struct copy, because compiler can emit call to memcpy. // Can't use internal_memcpy, because it copies byte-by-byte, - // and signal handler reads the handler concurrently. It it can read + // and signal handler reads the handler concurrently. It can read // some bytes from old value and some bytes from new value. // Use volatile to prevent insertion of memcpy. sigactions[sig].handler = diff --git a/flang/include/flang/Lower/CallInterface.h b/flang/include/flang/Lower/CallInterface.h --- a/flang/include/flang/Lower/CallInterface.h +++ b/flang/include/flang/Lower/CallInterface.h @@ -106,7 +106,7 @@ /// Value means passed by value at the mlir level, it is not necessarily /// implied by Fortran Value attribute. Value, - /// ValueAttribute means dummy has the the Fortran VALUE attribute. + /// ValueAttribute means dummy has the Fortran VALUE attribute. BaseAddressValueAttribute, CharBoxValueAttribute, // BoxChar with VALUE // Passing a character procedure as a diff --git a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h --- a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h +++ b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h @@ -372,7 +372,7 @@ /// fir::runtime::RuntimeTableEntry, "_FortranASumReal4"))> /// ``` -/// These entries are then used to to generate the MLIR FunctionType that +/// These entries are then used to generate the MLIR FunctionType that /// correspond to the runtime function declaration in C++. #undef FirE #define FirE(L, I) (I < sizeof(L) / sizeof(*L) ? L[I] : 0) diff --git a/flang/include/flang/Optimizer/Builder/Runtime/Reduction.h b/flang/include/flang/Optimizer/Builder/Runtime/Reduction.h --- a/flang/include/flang/Optimizer/Builder/Runtime/Reduction.h +++ b/flang/include/flang/Optimizer/Builder/Runtime/Reduction.h @@ -123,7 +123,7 @@ mlir::Value arrayBox, mlir::Value maskBox); /// Generate call to `MinvalCharacter` intrinsic runtime routine. This is the -/// version that that handles 1 dimensional character arrays with no DIM +/// version that handles 1 dimensional character arrays with no DIM /// argument. void genMinvalChar(fir::FirOpBuilder &builder, mlir::Location loc, mlir::Value resultBox, mlir::Value arrayBox, diff --git a/flang/lib/Decimal/big-radix-floating-point.h b/flang/lib/Decimal/big-radix-floating-point.h --- a/flang/lib/Decimal/big-radix-floating-point.h +++ b/flang/lib/Decimal/big-radix-floating-point.h @@ -335,7 +335,7 @@ // Adds another number and then divides by two. // Assumes same exponent and sign. - // Returns true when the the result has effectively been rounded down. + // Returns true when the result has effectively been rounded down. bool Mean(const BigRadixFloatingPointNumber &); // Parses a floating-point number; leaves the pointer reference diff --git a/flang/lib/Evaluate/fold-integer.cpp b/flang/lib/Evaluate/fold-integer.cpp --- a/flang/lib/Evaluate/fold-integer.cpp +++ b/flang/lib/Evaluate/fold-integer.cpp @@ -324,7 +324,7 @@ if (mask) { if (auto scalarMask{mask->GetScalarValue()}) { // Convert into array in case of scalar MASK= (for - // MAXLOC/MINLOC/FINDLOC mask should be be conformable) + // MAXLOC/MINLOC/FINDLOC mask should be conformable) ConstantSubscript n{GetSize(array->shape())}; std::vector> mask_elements( n, Scalar{scalarMask.value()}); diff --git a/flang/lib/Lower/ConvertExpr.cpp b/flang/lib/Lower/ConvertExpr.cpp --- a/flang/lib/Lower/ConvertExpr.cpp +++ b/flang/lib/Lower/ConvertExpr.cpp @@ -4761,7 +4761,7 @@ } /// If there were temporaries created for this element evaluation, finalize - /// and deallocate the resources now. This should be done just prior the the + /// and deallocate the resources now. This should be done just prior to the /// fir::ResultOp at the end of the innermost loop. void finalizeElementCtx() { if (elementCtx) { diff --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp --- a/flang/lib/Lower/IO.cpp +++ b/flang/lib/Lower/IO.cpp @@ -278,7 +278,7 @@ groupIsLocal = true; continue; } - // We know we have a global item. It it's not a pointer or allocatable, + // We know we have a global item. If it's not a pointer or allocatable, // create a static pointer to it. if (!IsAllocatableOrPointer(s)) { std::string mangleName = converter.mangleName(s) + ".desc"; diff --git a/flang/lib/Lower/IntrinsicCall.cpp b/flang/lib/Lower/IntrinsicCall.cpp --- a/flang/lib/Lower/IntrinsicCall.cpp +++ b/flang/lib/Lower/IntrinsicCall.cpp @@ -346,7 +346,7 @@ if (!absentDim && rank == 1) { // If dim argument is present and the array is rank 1, then the result is - // a scalar (since the the result is rank-1 or 0). + // a scalar (since the result is rank-1 or 0). // Therefore, we use a scalar result descriptor with Min/MaxlocDim(). mlir::Value dim = fir::getBase(args[1]); // Create mutable fir.box to be passed to the runtime for the result. diff --git a/flang/lib/Lower/OpenMP.cpp b/flang/lib/Lower/OpenMP.cpp --- a/flang/lib/Lower/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP.cpp @@ -246,7 +246,7 @@ } /// The COMMON block is a global structure. \p commonValue is the base address -/// of the the COMMON block. As the offset from the symbol \p sym, generate the +/// of the COMMON block. As the offset from the symbol \p sym, generate the /// COMMON block member value (commonValue + offset) for the symbol. /// FIXME: Share the code with `instantiateCommon` in ConvertVariable.cpp. static mlir::Value diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -2951,12 +2951,12 @@ /// Depending on the case condition type, one or several comparison and /// conditional branching can be generated. /// -/// A a point value case such as `case(4)`, a lower bound case such as +/// A point value case such as `case(4)`, a lower bound case such as /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a /// simple comparison between the selector value and the constant value in the /// case. The block associated with the case condition is then executed if /// the comparison succeed otherwise it branch to the next block with the -/// comparison for the the next case conditon. +/// comparison for the next case conditon. /// /// A closed interval case condition such as `case(7:10)` is converted with a /// first comparison and conditional branching for the lower bound. If diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp --- a/flang/lib/Semantics/expression.cpp +++ b/flang/lib/Semantics/expression.cpp @@ -2463,7 +2463,7 @@ template static const Symbol *AssumedTypePointerOrAllocatableDummy(const A &object) { // It is illegal for allocatable of pointer objects to be TYPE(*), but at that - // point it is is not guaranteed that it has been checked the object has + // point it is not guaranteed that it has been checked the object has // POINTER or ALLOCATABLE attribute, so do not assume nullptr can be directly // returned. return common::visit( diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -695,7 +695,7 @@ std::vector *> equivalenceSets; // Names of all common block objects in the scope std::set commonBlockObjects; - // Info about about SAVE statements and attributes in current scope + // Info about SAVE statements and attributes in current scope struct { std::optional saveAll; // "SAVE" without entity list std::set entities; // names of entities with save attr diff --git a/libc/src/__support/FPUtil/generic/FMA.h b/libc/src/__support/FPUtil/generic/FMA.h --- a/libc/src/__support/FPUtil/generic/FMA.h +++ b/libc/src/__support/FPUtil/generic/FMA.h @@ -223,7 +223,7 @@ if (r_exp > 0) { // The result is normal. We will shift the mantissa to the right by // 63 - 52 = 11 bits (from the locations of the most significant bit). - // Then the rounding bit will correspond the the 11th bit, and the lowest + // Then the rounding bit will correspond the 11th bit, and the lowest // 10 bits are merged into sticky bits. round_bit = (result & 0x0400ULL) != 0; sticky_bits |= (result & 0x03ffULL) != 0; diff --git a/libc/src/__support/FPUtil/x86_64/FEnvImpl.h b/libc/src/__support/FPUtil/x86_64/FEnvImpl.h --- a/libc/src/__support/FPUtil/x86_64/FEnvImpl.h +++ b/libc/src/__support/FPUtil/x86_64/FEnvImpl.h @@ -241,7 +241,7 @@ // We set the status flag for exception one at a time and call the // fwait instruction to actually get the processor to raise the // exception by calling the exception handler. This scheme is per - // the description in in "8.6 X87 FPU EXCEPTION SYNCHRONIZATION" + // the description in "8.6 X87 FPU EXCEPTION SYNCHRONIZATION" // of the "Intel 64 and IA-32 Architectures Software Developer's // Manual, Vol 1". diff --git a/libc/src/stdio/printf_core/parser.h b/libc/src/stdio/printf_core/parser.h --- a/libc/src/stdio/printf_core/parser.h +++ b/libc/src/stdio/printf_core/parser.h @@ -137,7 +137,7 @@ // the ArgList can only return the next item in the list. This function is // used in index mode when the item that needs to be read is not the next one. - // It moves cur_args to the index requested so the the appropriate value may + // It moves cur_args to the index requested so the appropriate value may // be read. This may involve parsing the format string, and is in the worst // case an O(n^2) operation. void args_to_index(size_t index); diff --git a/libc/src/stdio/scanf_core/parser.h b/libc/src/stdio/scanf_core/parser.h --- a/libc/src/stdio/scanf_core/parser.h +++ b/libc/src/stdio/scanf_core/parser.h @@ -85,7 +85,7 @@ // the ArgList can only return the next item in the list. This function is // used in index mode when the item that needs to be read is not the next one. - // It moves cur_args to the index requested so the the appropriate value may + // It moves cur_args to the index requested so the appropriate value may // be read. This may involve parsing the format string, and is in the worst // case an O(n^2) operation. void args_to_index(size_t index); diff --git a/libc/test/src/math/RoundToIntegerTest.h b/libc/test/src/math/RoundToIntegerTest.h --- a/libc/test/src/math/RoundToIntegerTest.h +++ b/libc/test/src/math/RoundToIntegerTest.h @@ -111,7 +111,7 @@ test_one_input(func, F(1234.0), I(1234), false); test_one_input(func, F(-1234.0), I(-1234), false); - // The rest of this this function compares with an equivalent MPFR function + // The rest of this function compares with an equivalent MPFR function // which rounds floating point numbers to long values. There is no MPFR // function to round to long long or wider integer values. So, we will // the remaining tests only if the width of I less than equal to that of diff --git a/libcxx/include/__format/buffer.h b/libcxx/include/__format/buffer.h --- a/libcxx/include/__format/buffer.h +++ b/libcxx/include/__format/buffer.h @@ -87,7 +87,7 @@ _LIBCPP_HIDE_FROM_ABI void __copy(basic_string_view<_InCharT> __str) { // When the underlying iterator is a simple iterator the __capacity_ is // infinite. For a string or container back_inserter it isn't. This means - // adding a large string the the buffer can cause some overhead. In that + // adding a large string to the buffer can cause some overhead. In that // case a better approach could be: // - flush the buffer // - container.append(__str.begin(), __str.end()); diff --git a/libcxxabi/src/private_typeinfo.cpp b/libcxxabi/src/private_typeinfo.cpp --- a/libcxxabi/src/private_typeinfo.cpp +++ b/libcxxabi/src/private_typeinfo.cpp @@ -1013,7 +1013,7 @@ if (info->search_done) break; // If we just found a dst_type with a public path to (static_ptr, static_type), - // then the only reason to continue the search is to make sure sure + // then the only reason to continue the search is to make sure // no other dst_type points to (static_ptr, static_type). // If !diamond, then we don't need to search here. // if we just found a dst_type with a private path to (static_ptr, static_type), diff --git a/libunwind/src/FrameHeaderCache.hpp b/libunwind/src/FrameHeaderCache.hpp --- a/libunwind/src/FrameHeaderCache.hpp +++ b/libunwind/src/FrameHeaderCache.hpp @@ -41,7 +41,7 @@ // Can't depend on the C++ standard library in libunwind, so use an array to // allocate the entries, and two linked lists for ordering unused and recently - // used entries. FIXME: Would the the extra memory for a doubly-linked list + // used entries. FIXME: Would the extra memory for a doubly-linked list // be better than the runtime cost of traversing a very short singly-linked // list on a cache miss? The entries themselves are all small and consecutive, // so unlikely to cause page faults when following the pointers. The memory diff --git a/libunwind/src/UnwindRegistersRestore.S b/libunwind/src/UnwindRegistersRestore.S --- a/libunwind/src/UnwindRegistersRestore.S +++ b/libunwind/src/UnwindRegistersRestore.S @@ -649,7 +649,7 @@ ldr d30, [x0, #0x200] ldr d31, [x0, #0x208] - // Finally, restore sp. This must be done after the the last read from the + // Finally, restore sp. This must be done after the last read from the // context struct, because it is allocated on the stack, and an exception // could clobber the de-allocated portion of the stack after sp has been // restored. diff --git a/lld/COFF/DebugTypes.cpp b/lld/COFF/DebugTypes.cpp --- a/lld/COFF/DebugTypes.cpp +++ b/lld/COFF/DebugTypes.cpp @@ -823,7 +823,7 @@ if (!pchSrc) return; - // To compute ghashes of a /Yu object file, we need to build on the the + // To compute ghashes of a /Yu object file, we need to build on the // ghashes of the /Yc PCH object. After we are done hashing, discard the // ghashes from the PCH source so we don't unnecessarily try to deduplicate // them. diff --git a/lld/COFF/PDB.cpp b/lld/COFF/PDB.cpp --- a/lld/COFF/PDB.cpp +++ b/lld/COFF/PDB.cpp @@ -119,7 +119,7 @@ std::vector &stringTableFixups, BinaryStreamRef symData); - // Write all module symbols from all all live debug symbol subsections of the + // Write all module symbols from all live debug symbol subsections of the // given object file into the given stream writer. Error writeAllModuleSymbolRecords(ObjFile *file, BinaryStreamWriter &writer); diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp --- a/lld/ELF/SyntheticSections.cpp +++ b/lld/ELF/SyntheticSections.cpp @@ -3651,7 +3651,7 @@ void PPC64LongBranchTargetSection::writeTo(uint8_t *buf) { // If linking non-pic we have the final addresses of the targets and they get // written to the table directly. For pic the dynamic linker will allocate - // the section and fill it it. + // the section and fill it. if (config->isPic) return; diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp --- a/lld/MachO/Driver.cpp +++ b/lld/MachO/Driver.cpp @@ -1383,7 +1383,7 @@ uint64_t pagezeroSize = args::getHex(args, OPT_pagezero_size, 0); // ld64 does something really weird. It attempts to realign the value to the - // page size, but assumes the the page size is 4K. This doesn't work with + // page size, but assumes the page size is 4K. This doesn't work with // most of Apple's ARM64 devices, which use a page size of 16K. This means // that it will first 4K align it by rounding down, then round up to 16K. // This probably only happened because no one using this arg with anything diff --git a/lld/MachO/ICF.cpp b/lld/MachO/ICF.cpp --- a/lld/MachO/ICF.cpp +++ b/lld/MachO/ICF.cpp @@ -436,7 +436,7 @@ // Some sections have embedded addends that foil ICF's hashing / equality // checks. (We can ignore embedded addends when doing ICF because the same // information gets recorded in our Reloc structs.) We therefore create a - // mutable copy of the the section data and zero out the embedded addends + // mutable copy of the section data and zero out the embedded addends // before performing any hashing / equality checks. if (isFoldableWithAddendsRemoved) { // We have to do this copying serially as the BumpPtrAllocator is not diff --git a/lld/MachO/InputFiles.cpp b/lld/MachO/InputFiles.cpp --- a/lld/MachO/InputFiles.cpp +++ b/lld/MachO/InputFiles.cpp @@ -525,9 +525,9 @@ // so there is no need to merge opcode bits with address // bits. Therefore, it's easy and convenient to store addends in the // instruction-stream bytes that would otherwise contain zeroes. By - // contrast, RISC ISAs such as ARM64 mix opcode bits with with - // address bits so that bitwise arithmetic is necessary to extract - // and insert them. Storing addends in the instruction stream is + // contrast, RISC ISAs such as ARM64 mix opcode bits with address + // bits so that bitwise arithmetic is necessary to extract and + // insert them. Storing addends in the instruction stream is // possible, but inconvenient and more costly at link time. relocation_info relInfo = relInfos[i]; diff --git a/lld/MachO/UnwindInfoSection.cpp b/lld/MachO/UnwindInfoSection.cpp --- a/lld/MachO/UnwindInfoSection.cpp +++ b/lld/MachO/UnwindInfoSection.cpp @@ -262,7 +262,7 @@ // application provides its own personality function, it might be // referenced by an extern Defined symbol reloc, or a local section reloc. if (auto *defined = dyn_cast(s)) { - // XXX(vyng) This is a a special case for handling duplicate personality + // XXX(vyng) This is a special case for handling duplicate personality // symbols. Note that LD64's behavior is a bit different and it is // inconsistent with how symbol resolution usually work // @@ -573,7 +573,7 @@ // If this is not the final page, see if it's possible to fit more // entries by using the regular format. This can happen when there - // are many unique encodings, and we we saturated the local + // are many unique encodings, and we saturated the local // encoding table early. if (i < cuIndices.size() && page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) { diff --git a/lld/test/ELF/arm-fix-cortex-a8-toolarge.s b/lld/test/ELF/arm-fix-cortex-a8-toolarge.s --- a/lld/test/ELF/arm-fix-cortex-a8-toolarge.s +++ b/lld/test/ELF/arm-fix-cortex-a8-toolarge.s @@ -38,7 +38,7 @@ target2: .section .text.02, "ax", %progbits /// 32-bit Branch and link spans 2 4KiB regions, preceded by a 32-bit -/// non branch instruction, a patch will be be attempted. Unfortunately the +/// non branch instruction, a patch will be attempted. Unfortunately the /// the BL cannot reach outside the section so we have to abort the patch. nop.w bl target2 diff --git a/lld/wasm/Writer.cpp b/lld/wasm/Writer.cpp --- a/lld/wasm/Writer.cpp +++ b/lld/wasm/Writer.cpp @@ -1084,7 +1084,7 @@ { raw_string_ostream os(bodyContent); // Initialize memory in a thread-safe manner. The thread that successfully - // increments the flag from 0 to 1 is is responsible for performing the + // increments the flag from 0 to 1 is responsible for performing the // memory initialization. Other threads go sleep on the flag until the // first thread finishing initializing memory, increments the flag to 2, // and wakes all the other threads. Once the flag has been set to 2, diff --git a/lldb/include/lldb/API/SBTrace.h b/lldb/include/lldb/API/SBTrace.h --- a/lldb/include/lldb/API/SBTrace.h +++ b/lldb/include/lldb/API/SBTrace.h @@ -41,7 +41,7 @@ SBTraceCursor CreateNewCursor(SBError &error, SBThread &thread); /// Save the trace to the specified directory, which will be created if - /// needed. This will also create a a file \a /trace.json with the + /// needed. This will also create a file \a /trace.json with the /// main properties of the trace session, along with others files which /// contain the actual trace data. The trace.json file can be used later as /// input for the "trace load" command to load the trace in LLDB, or for the diff --git a/lldb/include/lldb/Core/Module.h b/lldb/include/lldb/Core/Module.h --- a/lldb/include/lldb/Core/Module.h +++ b/lldb/include/lldb/Core/Module.h @@ -1069,7 +1069,7 @@ ModuleList::GetGlobalModuleListProperties().GetSymlinkMappings(); lldb::SectionListUP m_sections_up; ///< Unified section list for module that - /// is used by the ObjectFile and and + /// is used by the ObjectFile and /// ObjectFile instances for the debug info std::atomic m_did_load_objfile{false}; diff --git a/lldb/include/lldb/Expression/LLVMUserExpression.h b/lldb/include/lldb/Expression/LLVMUserExpression.h --- a/lldb/include/lldb/Expression/LLVMUserExpression.h +++ b/lldb/include/lldb/Expression/LLVMUserExpression.h @@ -40,7 +40,7 @@ static bool classof(const Expression *obj) { return obj->isA(&ID); } // The IRPasses struct is filled in by a runtime after an expression is - // compiled and can be used to to run fixups/analysis passes as required. + // compiled and can be used to run fixups/analysis passes as required. // EarlyPasses are run on the generated module before lldb runs its own IR // fixups and inserts instrumentation code/pointer checks. LatePasses are run // after the module has been processed by llvm, before the module is diff --git a/lldb/include/lldb/Interpreter/OptionValueProperties.h b/lldb/include/lldb/Interpreter/OptionValueProperties.h --- a/lldb/include/lldb/Interpreter/OptionValueProperties.h +++ b/lldb/include/lldb/Interpreter/OptionValueProperties.h @@ -86,7 +86,7 @@ bool will_modify, uint32_t idx) const; - // Property can be be a property path like + // Property can be a property path like // "target.process.extra-startup-command" virtual const Property *GetPropertyAtPath(const ExecutionContext *exe_ctx, bool will_modify, diff --git a/lldb/include/lldb/Symbol/CompilerType.h b/lldb/include/lldb/Symbol/CompilerType.h --- a/lldb/include/lldb/Symbol/CompilerType.h +++ b/lldb/include/lldb/Symbol/CompilerType.h @@ -324,7 +324,7 @@ uint64_t &language_flags) const; /// Lookup a child given a name. This function will match base class names and - /// member member names in "clang_type" only, not descendants. + /// member names in "clang_type" only, not descendants. uint32_t GetIndexOfChildWithName(const char *name, bool omit_empty_base_classes) const; diff --git a/lldb/include/lldb/Symbol/ObjectContainer.h b/lldb/include/lldb/Symbol/ObjectContainer.h --- a/lldb/include/lldb/Symbol/ObjectContainer.h +++ b/lldb/include/lldb/Symbol/ObjectContainer.h @@ -110,7 +110,7 @@ /// Selects an architecture in an object file. /// /// Object files that contain a single architecture should verify that the - /// specified \a arch matches the architecture in in object file and return + /// specified \a arch matches the architecture in the object file and return /// \b true or \b false accordingly. /// /// Object files that contain more than one architecture should attempt to diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h --- a/lldb/include/lldb/Target/Process.h +++ b/lldb/include/lldb/Target/Process.h @@ -1201,7 +1201,7 @@ /// this function if the platform fails to identify the host OS version. The /// platform should be checked first in case you are running a simulator /// platform that might itself be running natively, but have different - /// heuristics for figuring out which OS is is emulating. + /// heuristics for figuring out which OS is emulating. /// /// \return /// Returns the version tuple of the host OS. In case of failure an empty @@ -1786,7 +1786,7 @@ /// /// \param[in] load_addr /// The load address to query the range_info for. May include non - /// address bits, these will be removed by the the ABI plugin if there is + /// address bits, these will be removed by the ABI plugin if there is /// one. /// /// \param[out] range_info diff --git a/lldb/include/lldb/Target/TargetList.h b/lldb/include/lldb/Target/TargetList.h --- a/lldb/include/lldb/Target/TargetList.h +++ b/lldb/include/lldb/Target/TargetList.h @@ -25,7 +25,7 @@ /// Constructor /// /// The constructor for the target list is private. Clients can - /// get ahold of of the one and only target list through the + /// get ahold of the one and only target list through the /// lldb_private::Debugger::GetSharedInstance().GetTargetList(). /// /// \see static TargetList& lldb_private::Debugger::GetTargetList(). diff --git a/lldb/include/lldb/Target/Trace.h b/lldb/include/lldb/Target/Trace.h --- a/lldb/include/lldb/Target/Trace.h +++ b/lldb/include/lldb/Target/Trace.h @@ -57,7 +57,7 @@ virtual void Dump(Stream *s) const = 0; /// Save the trace to the specified directory, which will be created if - /// needed. This will also create a a file \a /trace.json with the + /// needed. This will also create a file \a /trace.json with the /// main properties of the trace session, along with others files which /// contain the actual trace data. The trace.json file can be used later as /// input for the "trace load" command to load the trace in LLDB. diff --git a/lldb/include/lldb/Utility/RangeMap.h b/lldb/include/lldb/Utility/RangeMap.h --- a/lldb/include/lldb/Utility/RangeMap.h +++ b/lldb/include/lldb/Utility/RangeMap.h @@ -244,7 +244,7 @@ if (first_intersect == m_entries.end()) return; - // We we can combine at least one entry, then we make a new collection and + // We can combine at least one entry, then we make a new collection and // populate it accordingly, and then swap it into place. auto pos = std::next(first_intersect); Collection minimal_ranges(m_entries.begin(), pos); @@ -501,7 +501,7 @@ } } - // We we can combine at least one entry, then we make a new collection and + // We can combine at least one entry, then we make a new collection and // populate it accordingly, and then swap it into place. if (can_combine) { Collection minimal_ranges; diff --git a/lldb/source/Breakpoint/Breakpoint.cpp b/lldb/source/Breakpoint/Breakpoint.cpp --- a/lldb/source/Breakpoint/Breakpoint.cpp +++ b/lldb/source/Breakpoint/Breakpoint.cpp @@ -418,7 +418,7 @@ void Breakpoint::SetCallback(BreakpointHitCallback callback, void *baton, bool is_synchronous) { // The default "Baton" class will keep a copy of "baton" and won't free or - // delete it when it goes goes out of scope. + // delete it when it goes out of scope. m_options.SetCallback(callback, std::make_shared(baton), is_synchronous); diff --git a/lldb/source/Breakpoint/BreakpointLocation.cpp b/lldb/source/Breakpoint/BreakpointLocation.cpp --- a/lldb/source/Breakpoint/BreakpointLocation.cpp +++ b/lldb/source/Breakpoint/BreakpointLocation.cpp @@ -204,7 +204,7 @@ void BreakpointLocation::SetCallback(BreakpointHitCallback callback, void *baton, bool is_synchronous) { // The default "Baton" class will keep a copy of "baton" and won't free or - // delete it when it goes goes out of scope. + // delete it when it goes out of scope. GetLocationOptions().SetCallback( callback, std::make_shared(baton), is_synchronous); SendBreakpointLocationChangedEvent(eBreakpointEventTypeCommandChanged); diff --git a/lldb/source/Breakpoint/BreakpointSite.cpp b/lldb/source/Breakpoint/BreakpointSite.cpp --- a/lldb/source/Breakpoint/BreakpointSite.cpp +++ b/lldb/source/Breakpoint/BreakpointSite.cpp @@ -49,7 +49,7 @@ bool BreakpointSite::ShouldStop(StoppointCallbackContext *context) { m_hit_counter.Increment(); - // ShouldStop can do a lot of work, and might even come come back and hit + // ShouldStop can do a lot of work, and might even come back and hit // this breakpoint site again. So don't hold the m_owners_mutex the whole // while. Instead make a local copy of the collection and call ShouldStop on // the copy. diff --git a/lldb/source/Breakpoint/Watchpoint.cpp b/lldb/source/Breakpoint/Watchpoint.cpp --- a/lldb/source/Breakpoint/Watchpoint.cpp +++ b/lldb/source/Breakpoint/Watchpoint.cpp @@ -63,7 +63,7 @@ void Watchpoint::SetCallback(WatchpointHitCallback callback, void *baton, bool is_synchronous) { // The default "Baton" class will keep a copy of "baton" and won't free or - // delete it when it goes goes out of scope. + // delete it when it goes out of scope. m_options.SetCallback(callback, std::make_shared(baton), is_synchronous); diff --git a/lldb/source/Commands/CommandObjectMemory.cpp b/lldb/source/Commands/CommandObjectMemory.cpp --- a/lldb/source/Commands/CommandObjectMemory.cpp +++ b/lldb/source/Commands/CommandObjectMemory.cpp @@ -1807,7 +1807,7 @@ return false; } - // Is is important that we track the address used to request the region as + // It is important that we track the address used to request the region as // this will give the correct section name in the case that regions overlap. // On Windows we get mutliple regions that start at the same place but are // different sizes and refer to different sections. diff --git a/lldb/source/Core/DataFileCache.cpp b/lldb/source/Core/DataFileCache.cpp --- a/lldb/source/Core/DataFileCache.cpp +++ b/lldb/source/Core/DataFileCache.cpp @@ -284,7 +284,7 @@ size_t length_offset = encoder.GetByteSize(); encoder.AppendU32(0); // Total length of all strings which will be fixed up. size_t strtab_offset = encoder.GetByteSize(); - encoder.AppendU8(0); // Start the string table with with an empty string. + encoder.AppendU8(0); // Start the string table with an empty string. for (auto s: m_strings) { // Make sure all of the offsets match up with what we handed out! assert(m_string_to_offset.find(s)->second == diff --git a/lldb/source/Core/ModuleList.cpp b/lldb/source/Core/ModuleList.cpp --- a/lldb/source/Core/ModuleList.cpp +++ b/lldb/source/Core/ModuleList.cpp @@ -356,7 +356,7 @@ } size_t remove_count = 0; // Modules might hold shared pointers to other modules, so removing one - // module might make other other modules orphans. Keep removing modules until + // module might make other modules orphans. Keep removing modules until // there are no further modules that can be removed. bool made_progress = true; while (made_progress) { diff --git a/lldb/source/Host/macosx/objcxx/Host.mm b/lldb/source/Host/macosx/objcxx/Host.mm --- a/lldb/source/Host/macosx/objcxx/Host.mm +++ b/lldb/source/Host/macosx/objcxx/Host.mm @@ -805,7 +805,7 @@ if (copyRightStatus != errAuthorizationSuccess) { // Eventually when the commandline supports running as root and the user // is not - // logged in in the current audit session, we will need the trick in gdb + // logged in to the current audit session, we will need the trick in gdb // where // we ask the user to type in the root passwd in the terminal. error.SetError(2, eErrorTypeGeneric); diff --git a/lldb/source/Plugins/ABI/AArch64/ABIMacOSX_arm64.h b/lldb/source/Plugins/ABI/AArch64/ABIMacOSX_arm64.h --- a/lldb/source/Plugins/ABI/AArch64/ABIMacOSX_arm64.h +++ b/lldb/source/Plugins/ABI/AArch64/ABIMacOSX_arm64.h @@ -46,7 +46,7 @@ // in other environments there can be a large number of different functions // involved in async traps. bool CallFrameAddressIsValid(lldb::addr_t cfa) override { - // Make sure the stack call frame addresses are are 8 byte aligned + // Make sure the stack call frame addresses are 8 byte aligned if (cfa & (8ull - 1ull)) return false; // Not 8 byte aligned if (cfa == 0) diff --git a/lldb/source/Plugins/ABI/AArch64/ABISysV_arm64.h b/lldb/source/Plugins/ABI/AArch64/ABISysV_arm64.h --- a/lldb/source/Plugins/ABI/AArch64/ABISysV_arm64.h +++ b/lldb/source/Plugins/ABI/AArch64/ABISysV_arm64.h @@ -49,7 +49,7 @@ // in other environments there can be a large number of different functions // involved in async traps. bool CallFrameAddressIsValid(lldb::addr_t cfa) override { - // Make sure the stack call frame addresses are are 8 byte aligned + // Make sure the stack call frame addresses are 8 byte aligned if (cfa & (8ull - 1ull)) return false; // Not 8 byte aligned if (cfa == 0) diff --git a/lldb/source/Plugins/ABI/ARM/ABIMacOSX_arm.h b/lldb/source/Plugins/ABI/ARM/ABIMacOSX_arm.h --- a/lldb/source/Plugins/ABI/ARM/ABIMacOSX_arm.h +++ b/lldb/source/Plugins/ABI/ARM/ABIMacOSX_arm.h @@ -37,7 +37,7 @@ bool RegisterIsVolatile(const lldb_private::RegisterInfo *reg_info) override; bool CallFrameAddressIsValid(lldb::addr_t cfa) override { - // Make sure the stack call frame addresses are are 4 byte aligned + // Make sure the stack call frame addresses are 4 byte aligned if (cfa & (4ull - 1ull)) return false; // Not 4 byte aligned if (cfa == 0) diff --git a/lldb/source/Plugins/ABI/ARM/ABISysV_arm.h b/lldb/source/Plugins/ABI/ARM/ABISysV_arm.h --- a/lldb/source/Plugins/ABI/ARM/ABISysV_arm.h +++ b/lldb/source/Plugins/ABI/ARM/ABISysV_arm.h @@ -37,7 +37,7 @@ bool RegisterIsVolatile(const lldb_private::RegisterInfo *reg_info) override; bool CallFrameAddressIsValid(lldb::addr_t cfa) override { - // Make sure the stack call frame addresses are are 4 byte aligned + // Make sure the stack call frame addresses are 4 byte aligned if (cfa & (4ull - 1ull)) return false; // Not 4 byte aligned if (cfa == 0) diff --git a/lldb/source/Plugins/ABI/X86/ABIMacOSX_i386.h b/lldb/source/Plugins/ABI/X86/ABIMacOSX_i386.h --- a/lldb/source/Plugins/ABI/X86/ABIMacOSX_i386.h +++ b/lldb/source/Plugins/ABI/X86/ABIMacOSX_i386.h @@ -52,7 +52,7 @@ // If we were to enforce 16-byte alignment, we also need to relax to 4-byte // alignment for non-darwin i386 targets. bool CallFrameAddressIsValid(lldb::addr_t cfa) override { - // Make sure the stack call frame addresses are are 4 byte aligned + // Make sure the stack call frame addresses are 4 byte aligned if (cfa & (4ull - 1ull)) return false; // Not 4 byte aligned if (cfa == 0) diff --git a/lldb/source/Plugins/Architecture/Arm/ArchitectureArm.cpp b/lldb/source/Plugins/Architecture/Arm/ArchitectureArm.cpp --- a/lldb/source/Plugins/Architecture/Arm/ArchitectureArm.cpp +++ b/lldb/source/Plugins/Architecture/Arm/ArchitectureArm.cpp @@ -51,7 +51,7 @@ // stepping because the debugger stops regardless due to the BVR/BCR // triggering a stop. // - // It also means we can set breakpoints on instructions inside an an if/then + // It also means we can set breakpoints on instructions inside an if/then // block and correctly skip them if we use the BKPT instruction. The ARM and // Thumb BKPT instructions are unconditional even when executed in a Thumb IT // block. diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h @@ -52,7 +52,7 @@ /// struct so that it can be passed to the JITted version of the IR. /// /// Fourth and finally, it "dematerializes" the struct after the JITted code -/// has has executed, placing the new values back where it found the old ones. +/// has executed, placing the new values back where it found the old ones. class ClangExpressionDeclMap : public ClangASTSource { public: /// Constructor diff --git a/lldb/source/Plugins/Platform/MacOSX/PlatformDarwin.h b/lldb/source/Plugins/Platform/MacOSX/PlatformDarwin.h --- a/lldb/source/Plugins/Platform/MacOSX/PlatformDarwin.h +++ b/lldb/source/Plugins/Platform/MacOSX/PlatformDarwin.h @@ -138,7 +138,7 @@ uint64_t abort_cause; // unsigned int }; - /// Extract the `__crash_info` annotations from each of of the target's + /// Extract the `__crash_info` annotations from each of the target's /// modules. /// /// If the platform have a crashed processes with a `__crash_info` section, diff --git a/lldb/source/Plugins/Platform/Windows/PlatformWindows.cpp b/lldb/source/Plugins/Platform/Windows/PlatformWindows.cpp --- a/lldb/source/Plugins/Platform/Windows/PlatformWindows.cpp +++ b/lldb/source/Plugins/Platform/Windows/PlatformWindows.cpp @@ -464,7 +464,7 @@ // mechanisms to do it for us, because it doesn't have the special knowledge // required for setting up the background thread or passing the right flags. // - // Another problem is that that LLDB's standard model for debugging a process + // Another problem is that LLDB's standard model for debugging a process // is to first launch it, have it stop at the entry point, and then attach to // it. In Windows this doesn't quite work, you have to specify as an // argument to CreateProcess() that you're going to debug the process. So we diff --git a/lldb/source/Plugins/Process/Linux/NativeProcessLinux.cpp b/lldb/source/Plugins/Process/Linux/NativeProcessLinux.cpp --- a/lldb/source/Plugins/Process/Linux/NativeProcessLinux.cpp +++ b/lldb/source/Plugins/Process/Linux/NativeProcessLinux.cpp @@ -558,7 +558,7 @@ SetCurrentThreadID(main_thread->GetID()); main_thread->SetStoppedByExec(); - // Tell coordinator about about the "new" (since exec) stopped main thread. + // Tell coordinator about the "new" (since exec) stopped main thread. ThreadWasCreated(*main_thread); // Let our delegate know we have just exec'd. diff --git a/lldb/source/Plugins/Process/Linux/Perf.cpp b/lldb/source/Plugins/Process/Linux/Perf.cpp --- a/lldb/source/Plugins/Process/Linux/Perf.cpp +++ b/lldb/source/Plugins/Process/Linux/Perf.cpp @@ -228,7 +228,7 @@ uint64_t actual_data_head = data_head % data_size; // The buffer has wrapped, so we first the oldest chunk of data output.insert(output.end(), data.begin() + actual_data_head, data.end()); - // And we we read the most recent chunk of data + // And we read the most recent chunk of data output.insert(output.end(), data.begin(), data.begin() + actual_data_head); } else { // There's been no wrapping, so we just read linearly diff --git a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp --- a/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp +++ b/lldb/source/Plugins/Process/elf-core/ProcessElfCore.cpp @@ -131,7 +131,7 @@ m_core_aranges.Append(range_entry); } } - // Keep a separate map of permissions that that isn't coalesced so all ranges + // Keep a separate map of permissions that isn't coalesced so all ranges // are maintained. const uint32_t permissions = ((header.p_flags & llvm::ELF::PF_R) ? lldb::ePermissionsReadable : 0u) | diff --git a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp --- a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -2060,7 +2060,7 @@ switch (stop_type) { case 'T': case 'S': { - // This is a bit of a hack, but is is required. If we did exec, we need to + // This is a bit of a hack, but it is required. If we did exec, we need to // clear our thread lists and also know to rebuild our dynamic register // info before we lookup and threads and populate the expedited register // values so we need to know this right away so we can cleanup and update diff --git a/lldb/source/Plugins/Process/minidump/RegisterContextMinidump_x86_32.h b/lldb/source/Plugins/Process/minidump/RegisterContextMinidump_x86_32.h --- a/lldb/source/Plugins/Process/minidump/RegisterContextMinidump_x86_32.h +++ b/lldb/source/Plugins/Process/minidump/RegisterContextMinidump_x86_32.h @@ -99,7 +99,7 @@ // The next field is included with // MinidumpContext_x86_32_Flags::ExtendedRegisters - // It contains vector (MMX/SSE) registers. It it laid out in the + // It contains vector (MMX/SSE) registers. It is laid out in the // format used by the fxsave and fsrstor instructions, so it includes // a copy of the x87 floating-point registers as well. See FXSAVE in // "Intel Architecture Software Developer's Manual, Volume 2." diff --git a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp --- a/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp +++ b/lldb/source/Plugins/ScriptInterpreter/Python/PythonDataObjects.cpp @@ -1322,7 +1322,7 @@ FileSP file_sp; if (borrowed) { - // In this case we we don't need to retain the python + // In this case we don't need to retain the python // object at all. file_sp = std::make_shared(fd, options.get(), false); } else { diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp @@ -1030,7 +1030,7 @@ } } } else if (is_cxx_method) { - // Look at the parent of this DIE and see if is is a class or + // Look at the parent of this DIE and see if it is a class or // struct and see if this is actually a C++ method Type *class_type = dwarf->ResolveType(decl_ctx_die); if (class_type) { @@ -2972,7 +2972,7 @@ // in our AST. Clang will re-create those articial members and they would // otherwise just overlap in the layout with the FieldDecls we add here. // This needs to be done after updating FieldInfo which keeps track of where - // field start/end so we don't later try to fill the the space of this + // field start/end so we don't later try to fill the space of this // artificial member with (unnamed bitfield) padding. // FIXME: This check should verify that this is indeed an artificial member // we are supposed to ignore. @@ -3556,7 +3556,7 @@ // Make sure this is a declaration and not a concrete instance by looking // for DW_AT_declaration set to 1. Sometimes concrete function instances are // placed inside the class definitions and shouldn't be included in the list - // of things are are tracking here. + // of things that are tracking here. if (die.GetAttributeValueAsUnsigned(DW_AT_declaration, 0) != 1) return; diff --git a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp --- a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp @@ -638,7 +638,7 @@ // Now that all strings have been gathered, we will emit the string table. strtab.Encode(encoder); - // Followed the the symbol table data. + // Followed by the symbol table data. encoder.AppendData(index_encoder.GetData()); } diff --git a/lldb/source/Plugins/Trace/intel-pt/CommandObjectTraceStartIntelPT.cpp b/lldb/source/Plugins/Trace/intel-pt/CommandObjectTraceStartIntelPT.cpp --- a/lldb/source/Plugins/Trace/intel-pt/CommandObjectTraceStartIntelPT.cpp +++ b/lldb/source/Plugins/Trace/intel-pt/CommandObjectTraceStartIntelPT.cpp @@ -186,7 +186,7 @@ {"b", kBytesMultiplier}, {"", kBytesMultiplier}}; const auto non_digit_index = size_expression.find_first_not_of("0123456789"); - if (non_digit_index == 0) { // expression starts from from non-digit char. + if (non_digit_index == 0) { // expression starts from non-digit char. return llvm::None; } diff --git a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp --- a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp +++ b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleLoader.cpp @@ -282,7 +282,7 @@ "tscPerfZeroConversion"?: { // Values used to convert between TSCs and nanoseconds. See the time_zero // section in https://man7.org/linux/man-pages/man2/perf_event_open.2.html - // for for information. + // for information. "timeMult": integer, "timeShift": integer, diff --git a/lldb/source/Symbol/LineTable.cpp b/lldb/source/Symbol/LineTable.cpp --- a/lldb/source/Symbol/LineTable.cpp +++ b/lldb/source/Symbol/LineTable.cpp @@ -89,7 +89,7 @@ if (!entries.empty() && entries.back().file_addr == file_addr) { // GCC don't use the is_prologue_end flag to mark the first instruction // after the prologue. - // Instead of it it is issuing a line table entry for the first instruction + // Instead of it is issuing a line table entry for the first instruction // of the prologue and one for the first instruction after the prologue. If // the size of the prologue is 0 instruction then the 2 line entry will // have the same file address. Removing it will remove our ability to diff --git a/lldb/source/Symbol/Symtab.cpp b/lldb/source/Symbol/Symtab.cpp --- a/lldb/source/Symbol/Symtab.cpp +++ b/lldb/source/Symbol/Symtab.cpp @@ -1286,7 +1286,7 @@ // Now that all strings have been gathered, we will emit the string table. strtab.Encode(encoder); - // Followed the the symbol table data. + // Followed by the symbol table data. encoder.AppendData(symtab_encoder.GetData()); return true; } diff --git a/lldb/source/Target/RegisterContextUnwind.cpp b/lldb/source/Target/RegisterContextUnwind.cpp --- a/lldb/source/Target/RegisterContextUnwind.cpp +++ b/lldb/source/Target/RegisterContextUnwind.cpp @@ -698,7 +698,7 @@ // frames with the same // CFA (in theory we // can have arbitrary number of frames with the same CFA, but more then 2 is - // very very unlikely) + // very unlikely) RegisterContextUnwind::SharedPtr next_frame = GetNextFrame(); if (next_frame) { diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp --- a/lldb/source/Target/Target.cpp +++ b/lldb/source/Target/Target.cpp @@ -1775,7 +1775,7 @@ } else { // We have at least one section loaded. This can be because we have // manually loaded some sections with "target modules load ..." or - // because we have have a live process that has sections loaded through + // because we have a live process that has sections loaded through // the dynamic loader load_addr = fixed_addr.GetOffset(); // "fixed_addr" doesn't have a section, so @@ -2058,7 +2058,7 @@ } else { // We have at least one section loaded. This can be because we have // manually loaded some sections with "target modules load ..." or - // because we have have a live process that has sections loaded through + // because we have a live process that has sections loaded through // the dynamic loader section_load_list.ResolveLoadAddress(pointer_vm_addr, pointer_addr); } diff --git a/lldb/source/Utility/Args.cpp b/lldb/source/Utility/Args.cpp --- a/lldb/source/Utility/Args.cpp +++ b/lldb/source/Utility/Args.cpp @@ -26,7 +26,7 @@ // Inside double quotes, '\' and '"' are special. static const char *k_escapable_characters = "\"\\"; while (true) { - // Skip over over regular characters and append them. + // Skip over regular characters and append them. size_t regular = quoted.find_first_of(k_escapable_characters); result += quoted.substr(0, regular); quoted = quoted.substr(regular); @@ -94,7 +94,7 @@ bool arg_complete = false; do { - // Skip over over regular characters and append them. + // Skip over regular characters and append them. size_t regular = command.find_first_of(" \t\r\"'`\\"); arg += command.substr(0, regular); command = command.substr(regular); diff --git a/lldb/tools/debugserver/source/MacOSX/MachTask.mm b/lldb/tools/debugserver/source/MacOSX/MachTask.mm --- a/lldb/tools/debugserver/source/MacOSX/MachTask.mm +++ b/lldb/tools/debugserver/source/MacOSX/MachTask.mm @@ -670,7 +670,7 @@ err = RestoreExceptionPortInfo(); - // NULL our our exception port and let our exception thread exit + // NULL our exception port and let our exception thread exit mach_port_t exception_port = m_exception_port; m_exception_port = 0; diff --git a/lldb/tools/lldb-vscode/JSONUtils.cpp b/lldb/tools/lldb-vscode/JSONUtils.cpp --- a/lldb/tools/lldb-vscode/JSONUtils.cpp +++ b/lldb/tools/lldb-vscode/JSONUtils.cpp @@ -1042,7 +1042,7 @@ SetValueForKey(v, object, "value"); auto type_obj = v.GetType(); auto type_cstr = type_obj.GetDisplayTypeName(); - // If we have a type with many many children, we would like to be able to + // If we have a type with many children, we would like to be able to // give a hint to the IDE that the type has indexed children so that the // request can be broken up in grabbing only a few children at a time. We want // to be careful and only call "v.GetNumChildren()" if we have an array type diff --git a/lldb/tools/lldb-vscode/lldb-vscode.cpp b/lldb/tools/lldb-vscode/lldb-vscode.cpp --- a/lldb/tools/lldb-vscode/lldb-vscode.cpp +++ b/lldb/tools/lldb-vscode/lldb-vscode.cpp @@ -1736,7 +1736,7 @@ // selected target after these commands are run. g_vsc.target = g_vsc.debugger.GetSelectedTarget(); // Make sure the process is launched and stopped at the entry point before - // proceeding as the the launch commands are not run using the synchronous + // proceeding as the launch commands are not run using the synchronous // mode. error = g_vsc.WaitForProcessToStop(timeout_seconds); } diff --git a/lldb/unittests/Interpreter/TestCompletion.cpp b/lldb/unittests/Interpreter/TestCompletion.cpp --- a/lldb/unittests/Interpreter/TestCompletion.cpp +++ b/lldb/unittests/Interpreter/TestCompletion.cpp @@ -60,7 +60,7 @@ void SetUp() override { // chdir back into the original working dir this test binary started with. - // A previous test may have have changed the working dir. + // A previous test may have changed the working dir. ASSERT_NO_ERROR(fs::set_current_path(OriginalWorkingDir)); // Get the name of the current test. To prevent that by chance two tests diff --git a/lldb/unittests/Process/gdb-remote/PortMapTest.cpp b/lldb/unittests/Process/gdb-remote/PortMapTest.cpp --- a/lldb/unittests/Process/gdb-remote/PortMapTest.cpp +++ b/lldb/unittests/Process/gdb-remote/PortMapTest.cpp @@ -19,7 +19,7 @@ GDBRemoteCommunicationServerPlatform::PortMap p1; ASSERT_TRUE(p1.empty()); - // Empty means no restrictions, return 0 and and bind to get a port + // Empty means no restrictions, return 0 and bind to get a port llvm::Expected available_port = p1.GetNextAvailablePort(); ASSERT_THAT_EXPECTED(available_port, llvm::HasValue(0)); diff --git a/llvm/docs/PDB/DbiStream.rst b/llvm/docs/PDB/DbiStream.rst --- a/llvm/docs/PDB/DbiStream.rst +++ b/llvm/docs/PDB/DbiStream.rst @@ -210,7 +210,7 @@ uint16_t Dirty : 1; // ``true`` if EC information is present for this module. EC is presumed to // stand for "Edit & Continue", which LLVM does not support. So this flag - // will always be be false. + // will always be false. uint16_t EC : 1; uint16_t Unused : 6; // Type Server Index for this module. This is assumed to be related to /Zi, diff --git a/llvm/include/llvm/ADT/CombinationGenerator.h b/llvm/include/llvm/ADT/CombinationGenerator.h --- a/llvm/include/llvm/ADT/CombinationGenerator.h +++ b/llvm/include/llvm/ADT/CombinationGenerator.h @@ -73,7 +73,7 @@ SmallVector, variable_smallsize> VariablesState; - // 'increment' of the the whole VariablesState is defined identically to the + // 'increment' of the whole VariablesState is defined identically to the // increment of a number: starting from the least significant element, // increment it, and if it wrapped, then propagate that carry by also // incrementing next (more significant) element. diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h --- a/llvm/include/llvm/ADT/FloatingPointMode.h +++ b/llvm/include/llvm/ADT/FloatingPointMode.h @@ -157,7 +157,7 @@ .Default(DenormalMode::Invalid); } -/// Return the name used for the denormal handling mode used by the the +/// Return the name used for the denormal handling mode used by the /// expected names from the denormal-fp-math attribute. inline StringRef denormalModeKindName(DenormalMode::DenormalModeKind Mode) { switch (Mode) { diff --git a/llvm/include/llvm/ADT/IntervalMap.h b/llvm/include/llvm/ADT/IntervalMap.h --- a/llvm/include/llvm/ADT/IntervalMap.h +++ b/llvm/include/llvm/ADT/IntervalMap.h @@ -1221,7 +1221,7 @@ unsigned size[Nodes]; IdxPair NewOffset(0, Position); - // Is is very common for the root node to be smaller than external nodes. + // It is very common for the root node to be smaller than external nodes. if (Nodes == 1) size[0] = rootSize; else @@ -1262,7 +1262,7 @@ unsigned Size[Nodes]; IdxPair NewOffset(0, Position); - // Is is very common for the root node to be smaller than external nodes. + // It is very common for the root node to be smaller than external nodes. if (Nodes == 1) Size[0] = rootSize; else @@ -1814,7 +1814,7 @@ // Insert into the branch node at Level-1. if (P.size(Level) == Branch::Capacity) { - // Branch node is full, handle handle the overflow. + // Branch node is full, handle the overflow. assert(!SplitRoot && "Cannot overflow after splitting the root"); SplitRoot = overflow(Level); Level += SplitRoot; diff --git a/llvm/include/llvm/ADT/STLExtras.h b/llvm/include/llvm/ADT/STLExtras.h --- a/llvm/include/llvm/ADT/STLExtras.h +++ b/llvm/include/llvm/ADT/STLExtras.h @@ -2221,7 +2221,7 @@ } // end namespace detail -/// Given an input range, returns a new range whose values are are pair (A,B) +/// Given an input range, returns a new range whose values are pair (A,B) /// such that A is the 0-based index of the item in the sequence, and B is /// the value from the original sequence. Example: /// diff --git a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h --- a/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h +++ b/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h @@ -1381,7 +1381,7 @@ if (ReachableBlocks.empty()) return; - // The map is used to to index successors/predecessors of reachable blocks in + // The map is used to index successors/predecessors of reachable blocks in // the ReachableBlocks vector DenseMap BlockIndex; // Extract initial frequencies for the reachable blocks diff --git a/llvm/include/llvm/Analysis/GuardUtils.h b/llvm/include/llvm/Analysis/GuardUtils.h --- a/llvm/include/llvm/Analysis/GuardUtils.h +++ b/llvm/include/llvm/Analysis/GuardUtils.h @@ -44,7 +44,7 @@ Value *&WidenableCondition, BasicBlock *&IfTrueBB, BasicBlock *&IfFalseBB); -/// Analgous to the above, but return the Uses so that that they can be +/// Analogous to the above, but return the Uses so that they can be /// modified. Unlike previous version, Condition is optional and may be null. bool parseWidenableBranch(User *U, Use *&Cond, Use *&WC, BasicBlock *&IfTrueBB, BasicBlock *&IfFalseBB); diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h --- a/llvm/include/llvm/Analysis/ScalarEvolution.h +++ b/llvm/include/llvm/Analysis/ScalarEvolution.h @@ -325,7 +325,7 @@ /// If Signed is a function that takes an n-bit tuple and maps to the /// integer domain as the tuples value interpreted as twos complement, /// and Unsigned a function that takes an n-bit tuple and maps to the - /// integer domain as as the base two value of input tuple, then a + b + /// integer domain as the base two value of input tuple, then a + b /// has IncrementNUSW iff: /// /// 0 <= Unsigned(a) + Signed(b) < 2^n diff --git a/llvm/include/llvm/AsmParser/Parser.h b/llvm/include/llvm/AsmParser/Parser.h --- a/llvm/include/llvm/AsmParser/Parser.h +++ b/llvm/include/llvm/AsmParser/Parser.h @@ -96,7 +96,7 @@ /// This function is a main interface to the LLVM Assembly Parser. It parses /// an ASCII file that (presumably) contains LLVM Assembly code for a module -/// summary. It returns a a ModuleSummaryIndex with the corresponding features. +/// summary. It returns a ModuleSummaryIndex with the corresponding features. /// Note that this does not verify that the generated Index is valid, so you /// should run the verifier after parsing the file to check that it is okay. /// Parse LLVM Assembly Index from a file diff --git a/llvm/include/llvm/BinaryFormat/MachO.h b/llvm/include/llvm/BinaryFormat/MachO.h --- a/llvm/include/llvm/BinaryFormat/MachO.h +++ b/llvm/include/llvm/BinaryFormat/MachO.h @@ -1071,7 +1071,7 @@ }; /// dyld_chained_starts_in_image is embedded in LC_DYLD_CHAINED_FIXUPS payload. -/// Each each seg_info_offset entry is the offset into this struct for that +/// Each seg_info_offset entry is the offset into this struct for that /// segment followed by pool of dyld_chain_starts_in_segment data. struct dyld_chained_starts_in_image { uint32_t seg_count; diff --git a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h --- a/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h +++ b/llvm/include/llvm/CodeGen/CodeGenPassBuilder.h @@ -290,7 +290,7 @@ /// all virtual registers. /// /// Note if the target overloads addRegAssignAndRewriteOptimized, this may not - /// be honored. This is also not generally used for the the fast variant, + /// be honored. This is also not generally used for the fast variant, /// where the allocation and rewriting are done in one pass. void addPreRewrite(AddMachinePass &) const {} diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h --- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -344,7 +344,7 @@ void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB, MachineIRBuilder &MIB); - /// Generate for for the BitTest header block, which precedes each sequence of + /// Generate for the BitTest header block, which precedes each sequence of /// BitTestCases. void emitBitTestHeader(SwitchCG::BitTestBlock &BTB, MachineBasicBlock *SwitchMBB); diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h @@ -242,7 +242,7 @@ /// needs to be widened to evenly cover \p DstReg, inserts high bits /// corresponding to the extension opcode \p PadStrategy. /// - /// \p VRegs will be cleared, and the the result \p NarrowTy register pieces + /// \p VRegs will be cleared, and the result \p NarrowTy register pieces /// will replace it. Returns The complete LCMTy that \p VRegs will cover when /// merged. LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy, diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h @@ -709,7 +709,7 @@ using namespace LegalityPredicates; return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1); } - /// The instruction is lowered when when type indexes 0, 1, and 2 are all in + /// The instruction is lowered when type indexes 0, 1, and 2 are all in /// their respective lists. LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list Types0, std::initializer_list Types1, @@ -857,7 +857,7 @@ std::initializer_list Types1) { return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1); } - /// The instruction is custom when when type indexes 0, 1, and 2 are all in + /// The instruction is custom when type indexes 0, 1, and 2 are all in /// their respective lists. LegalizeRuleSet & customForCartesianProduct(std::initializer_list Types0, diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h --- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -804,7 +804,7 @@ /// /// \pre setBasicBlock or setMI must have been called. /// \pre \p TablePtr must be a generic virtual register with pointer type. - /// \pre \p JTI must be be a jump table index. + /// \pre \p JTI must be a jump table index. /// \pre \p IndexReg must be a generic virtual register with pointer type. /// /// \return a MachineInstrBuilder for the newly created instruction. diff --git a/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h --- a/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/RegBankSelect.h @@ -190,7 +190,7 @@ /// Frequency of the insertion point. /// \p P is used to access the various analysis that will help to /// get that information, like MachineBlockFrequencyInfo. If \p P - /// does not contain enough enough to return the actual frequency, + /// does not contain enough to return the actual frequency, /// this returns 1. virtual uint64_t frequency(const Pass &P) const { return 1; } diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -906,7 +906,7 @@ FP_TO_BF16, /// Perform various unary floating-point operations inspired by libm. For - /// FPOWI, the result is undefined if if the integer operand doesn't fit into + /// FPOWI, the result is undefined if the integer operand doesn't fit into /// sizeof(int). FNEG, FABS, diff --git a/llvm/include/llvm/CodeGen/LiveInterval.h b/llvm/include/llvm/CodeGen/LiveInterval.h --- a/llvm/include/llvm/CodeGen/LiveInterval.h +++ b/llvm/include/llvm/CodeGen/LiveInterval.h @@ -857,7 +857,7 @@ /// V2: sub0 sub1 sub2 sub3 /// V1: sub0 sub1 /// - /// This offset will look like a composed subregidx in the the class: + /// This offset will look like a composed subregidx in the class: /// V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32> /// => V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32> /// diff --git a/llvm/include/llvm/CodeGen/LiveIntervals.h b/llvm/include/llvm/CodeGen/LiveIntervals.h --- a/llvm/include/llvm/CodeGen/LiveIntervals.h +++ b/llvm/include/llvm/CodeGen/LiveIntervals.h @@ -322,7 +322,7 @@ /// OrigRegs is a vector of registers that were originally used by the /// instructions in the range between the two iterators. /// - /// Currently, the only only changes that are supported are simple removal + /// Currently, the only changes that are supported are simple removal /// and addition of uses. void repairIntervalsInRange(MachineBasicBlock *MBB, MachineBasicBlock::iterator Begin, diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -220,7 +220,7 @@ /// Return a formatted string to identify this block and its parent function. std::string getFullName() const; - /// Test whether this block is used as as something other than the target + /// Test whether this block is used as something other than the target /// of a terminator, exception-handling target, or jump table. This is /// either the result of an IR-level "blockaddress", or some form /// of target-specific branch lowering. diff --git a/llvm/include/llvm/CodeGen/ScheduleDAG.h b/llvm/include/llvm/CodeGen/ScheduleDAG.h --- a/llvm/include/llvm/CodeGen/ScheduleDAG.h +++ b/llvm/include/llvm/CodeGen/ScheduleDAG.h @@ -757,7 +757,7 @@ /// be added from SUnit \p X to SUnit \p Y. void AddPredQueued(SUnit *Y, SUnit *X); - /// Updates the topological ordering to accommodate an an edge to be + /// Updates the topological ordering to accommodate an edge to be /// removed from the specified node \p N from the predecessors of the /// current node \p M. void RemovePred(SUnit *M, SUnit *N); diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -3348,7 +3348,7 @@ /// is[Z|FP]ExtFree of the related types is not true. virtual bool isExtFreeImpl(const Instruction *I) const { return false; } - /// Depth that GatherAllAliases should should continue looking for chain + /// Depth that GatherAllAliases should continue looking for chain /// dependencies when trying to find a more preferable chain. As an /// approximation, this should be more than the number of consecutive stores /// expected to be merged. @@ -3651,7 +3651,7 @@ /// \p AssumeSingleUse When this parameter is true, this function will /// attempt to simplify \p Op even if there are multiple uses. /// Callers are responsible for correctly updating the DAG based on the - /// results of this function, because simply replacing replacing TLO.Old + /// results of this function, because simply replacing TLO.Old /// with TLO.New will be incorrect when this parameter is true and TLO.Old /// has multiple uses. bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, @@ -3709,7 +3709,7 @@ /// \p AssumeSingleUse When this parameter is true, this function will /// attempt to simplify \p Op even if there are multiple uses. /// Callers are responsible for correctly updating the DAG based on the - /// results of this function, because simply replacing replacing TLO.Old + /// results of this function, because simply replacing TLO.Old /// with TLO.New will be incorrect when this parameter is true and TLO.Old /// has multiple uses. bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, diff --git a/llvm/include/llvm/CodeGen/TargetPassConfig.h b/llvm/include/llvm/CodeGen/TargetPassConfig.h --- a/llvm/include/llvm/CodeGen/TargetPassConfig.h +++ b/llvm/include/llvm/CodeGen/TargetPassConfig.h @@ -401,7 +401,7 @@ /// all virtual registers. /// /// Note if the target overloads addRegAssignAndRewriteOptimized, this may not - /// be honored. This is also not generally used for the the fast variant, + /// be honored. This is also not generally used for the fast variant, /// where the allocation and rewriting are done in one pass. virtual bool addPreRewrite() { return false; diff --git a/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h b/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h --- a/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h +++ b/llvm/include/llvm/DebugInfo/CodeView/TypeHashing.h @@ -70,7 +70,7 @@ /// TypeIndex that refers to B with a previously-computed global hash for B. As /// this is a recursive algorithm (e.g. the global hash of B also depends on the /// global hashes of the types that B refers to), a global hash can uniquely -/// identify identify that A occurs in another stream that has a completely +/// identify that A occurs in another stream that has a completely /// different graph structure. Although the hash itself is slower to compute, /// probing is much faster with a globally hashed type, because the hash itself /// is considered "as good as" the original type. Since type records can be diff --git a/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h b/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h --- a/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h +++ b/llvm/include/llvm/DebugInfo/GSYM/InlineInfo.h @@ -91,7 +91,7 @@ /// exists for \a Addr, then \a SrcLocs will be left untouched. If there is /// inline information for \a Addr, then \a SrcLocs will be modifiied to /// contain the deepest most inline function's SourceLocation at index zero - /// in the array and proceed up the the concrete function source file and + /// in the array and proceed up the concrete function source file and /// line at the end of the array. /// /// \param GR The GSYM reader that contains the string and file table that diff --git a/llvm/include/llvm/Demangle/Demangle.h b/llvm/include/llvm/Demangle/Demangle.h --- a/llvm/include/llvm/Demangle/Demangle.h +++ b/llvm/include/llvm/Demangle/Demangle.h @@ -105,7 +105,7 @@ char *getFunctionParameters(char *Buf, size_t *N) const; char *getFunctionReturnType(char *Buf, size_t *N) const; - /// If this function has any any cv or reference qualifiers. These imply that + /// If this function has any cv or reference qualifiers. These imply that /// the function is a non-static member function. bool hasFunctionQualifiers() const; diff --git a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h --- a/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h +++ b/llvm/include/llvm/ExecutionEngine/JITLink/aarch64.h @@ -132,7 +132,7 @@ /// Errors: /// - The result of the unshifted part of the fixup expression must be /// 32-bit aligned otherwise an alignment error will be returned. - /// - The result of the fixup expression must fit into an an int19 or an + /// - The result of the fixup expression must fit into an int19 or an /// out-of-range error will be returned. LDRLiteral19, diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h --- a/llvm/include/llvm/IR/AutoUpgrade.h +++ b/llvm/include/llvm/IR/AutoUpgrade.h @@ -52,7 +52,7 @@ /// so that it can update all calls to the old function. void UpgradeCallsToIntrinsic(Function* F); - /// This checks for global variables which should be upgraded. It it requires + /// This checks for global variables which should be upgraded. If it requires /// upgrading, returns a pointer to the upgraded variable. GlobalVariable *UpgradeGlobalVariable(GlobalVariable *GV); diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -2344,7 +2344,7 @@ /// Return true if this shuffle mask is an insert subvector mask. /// A valid insert subvector mask inserts the lowest elements of a second - /// source operand into an in-place first source operand operand. + /// source operand into an in-place first source operand. /// Both the sub vector width and the insertion index is returned. static bool isInsertSubvectorMask(ArrayRef Mask, int NumSrcElts, int &NumSubElts, int &Index); diff --git a/llvm/include/llvm/IR/LegacyPassManagers.h b/llvm/include/llvm/IR/LegacyPassManagers.h --- a/llvm/include/llvm/IR/LegacyPassManagers.h +++ b/llvm/include/llvm/IR/LegacyPassManagers.h @@ -422,8 +422,8 @@ SmallVector PassVector; // Collection of Analysis provided by Parent pass manager and - // used by current pass manager. At at time there can not be more - // then PMT_Last active pass mangers. + // used by current pass manager. At any time there can not be more + // then PMT_Last active pass managers. DenseMap *InheritedAnalysis[PMT_Last]; /// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions diff --git a/llvm/include/llvm/IR/PassManagerInternal.h b/llvm/include/llvm/IR/PassManagerInternal.h --- a/llvm/include/llvm/IR/PassManagerInternal.h +++ b/llvm/include/llvm/IR/PassManagerInternal.h @@ -52,7 +52,7 @@ /// Polymorphic method to access the name of a pass. virtual StringRef name() const = 0; - /// Polymorphic method to to let a pass optionally exempted from skipping by + /// Polymorphic method to let a pass optionally exempted from skipping by /// PassInstrumentation. /// To opt-in, pass should implement `static bool isRequired()`. It's no-op /// to have `isRequired` always return false since that is the default. diff --git a/llvm/include/llvm/MC/MCSectionWasm.h b/llvm/include/llvm/MC/MCSectionWasm.h --- a/llvm/include/llvm/MC/MCSectionWasm.h +++ b/llvm/include/llvm/MC/MCSectionWasm.h @@ -33,7 +33,7 @@ // itself and does not include the size of the section header. uint64_t SectionOffset = 0; - // For data sections, this is the index of of the corresponding wasm data + // For data sections, this is the index of the corresponding wasm data // segment uint32_t SegmentIndex = 0; diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h --- a/llvm/include/llvm/MC/MCStreamer.h +++ b/llvm/include/llvm/MC/MCStreamer.h @@ -266,7 +266,7 @@ virtual void emitRawTextImpl(StringRef String); - /// Returns true if the the .cv_loc directive is in the right section. + /// Returns true if the .cv_loc directive is in the right section. bool checkCVLocSection(unsigned FuncId, unsigned FileNo, SMLoc Loc); public: diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h --- a/llvm/include/llvm/Passes/PassBuilder.h +++ b/llvm/include/llvm/Passes/PassBuilder.h @@ -340,7 +340,7 @@ /// mpass1,fpass1,fpass2,mpass2,lpass1 /// /// This pipeline uses only one pass manager: the top-level module manager. - /// fpass1,fpass2 and lpass1 are added into the the top-level module manager + /// fpass1,fpass2 and lpass1 are added into the top-level module manager /// using only adaptor passes. No nested function/loop pass managers are /// added. The purpose is to allow easy pass testing when the user /// specifically want the pass to run under a adaptor directly. This is diff --git a/llvm/include/llvm/Support/DynamicLibrary.h b/llvm/include/llvm/Support/DynamicLibrary.h --- a/llvm/include/llvm/Support/DynamicLibrary.h +++ b/llvm/include/llvm/Support/DynamicLibrary.h @@ -103,7 +103,7 @@ /// This function closes the dynamic library at the given path, using the /// library close operation of the host operating system, and there is no - /// guarantee if or when this will cause the the library to be unloaded. + /// guarantee if or when this will cause the library to be unloaded. /// /// This function should be called only if the library was loaded using the /// getLibrary() function. diff --git a/llvm/include/llvm/Support/FileSystem.h b/llvm/include/llvm/Support/FileSystem.h --- a/llvm/include/llvm/Support/FileSystem.h +++ b/llvm/include/llvm/Support/FileSystem.h @@ -794,7 +794,7 @@ /// is false the current directory will be used instead. /// /// This function does not check if the file exists. If you want to be sure -/// that the file does not yet exist, you should use use enough '%' characters +/// that the file does not yet exist, you should use enough '%' characters /// in your model to ensure this. Each '%' gives 4-bits of entropy so you can /// use 32 of them to get 128 bits of entropy. /// diff --git a/llvm/include/llvm/Support/MemAlloc.h b/llvm/include/llvm/Support/MemAlloc.h --- a/llvm/include/llvm/Support/MemAlloc.h +++ b/llvm/include/llvm/Support/MemAlloc.h @@ -64,7 +64,7 @@ /// Allocate a buffer of memory with the given size and alignment. /// -/// When the compiler supports aligned operator new, this will use it to to +/// When the compiler supports aligned operator new, this will use it to /// handle even over-aligned allocations. /// /// However, this doesn't make any attempt to leverage the fancier techniques diff --git a/llvm/include/llvm/Support/SourceMgr.h b/llvm/include/llvm/Support/SourceMgr.h --- a/llvm/include/llvm/Support/SourceMgr.h +++ b/llvm/include/llvm/Support/SourceMgr.h @@ -59,7 +59,7 @@ /// dynamically based on the size of Buffer. mutable void *OffsetCache = nullptr; - /// Look up a given \p Ptr in in the buffer, determining which line it came + /// Look up a given \p Ptr in the buffer, determining which line it came /// from. unsigned getLineNumber(const char *Ptr) const; template diff --git a/llvm/include/llvm/Support/TrailingObjects.h b/llvm/include/llvm/Support/TrailingObjects.h --- a/llvm/include/llvm/Support/TrailingObjects.h +++ b/llvm/include/llvm/Support/TrailingObjects.h @@ -37,7 +37,7 @@ /// determine the size needed for allocation via /// 'additionalSizeToAlloc' and 'totalSizeToAlloc'. /// -/// All the methods implemented by this class are are intended for use +/// All the methods implemented by this class are intended for use /// by the implementation of the class, not as part of its interface /// (thus, private inheritance is suggested). /// diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h --- a/llvm/include/llvm/Support/VirtualFileSystem.h +++ b/llvm/include/llvm/Support/VirtualFileSystem.h @@ -67,7 +67,7 @@ /// FIXME: Currently the external path is exposed by replacing the virtual /// path in this Status object. Instead, we should leave the path in the /// Status intact (matching the requested virtual path) - see - /// FileManager::getFileRef for how how we plan to fix this. + /// FileManager::getFileRef for how we plan to fix this. bool ExposesExternalVFSPath = false; Status() = default; @@ -863,7 +863,7 @@ LookupResult(Entry *E, sys::path::const_iterator Start, sys::path::const_iterator End); - /// If the found Entry maps the the input path to a path in the external + /// If the found Entry maps the input path to a path in the external /// file system (i.e. it is a FileEntry or DirectoryRemapEntry), returns /// that path. Optional getExternalRedirect() const { diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -310,7 +310,7 @@ string DiagnosticType = ""; // A diagnostic message to emit when an invalid value is provided for this - // register class when it is being used an an assembly operand. If this is + // register class when it is being used as an assembly operand. If this is // non-empty, an anonymous diagnostic type enum value will be generated, and // the assembly matcher will provide a function to map from diagnostic types // to message strings. diff --git a/llvm/include/llvm/Transforms/Utils/MisExpect.h b/llvm/include/llvm/Transforms/Utils/MisExpect.h --- a/llvm/include/llvm/Transforms/Utils/MisExpect.h +++ b/llvm/include/llvm/Transforms/Utils/MisExpect.h @@ -28,7 +28,7 @@ /// checkBackendInstrumentation - compares PGO counters to the thresholds used /// for llvm.expect and warns if the PGO counters are outside of the expected /// range. It extracts the expected weights from the MD_prof weights attatched -/// to the instruction, which are are assumed to come from lowered llvm.expect +/// to the instruction, which are assumed to come from lowered llvm.expect /// intrinsics. The RealWeights parameter and the extracted expected weights are /// then passed to verifyMisexpect() for verification /// @@ -40,7 +40,7 @@ /// checkFrontendInstrumentation - compares PGO counters to the thresholds used /// for llvm.expect and warns if the PGO counters are outside of the expected /// range. It extracts the expected weights from the MD_prof weights attatched -/// to the instruction, which are are assumed to come from profiling data +/// to the instruction, which are assumed to come from profiling data /// attached by the frontend prior to llvm.expect intrinsic lowering. The /// ExpectedWeights parameter and the extracted real weights are then passed to /// verifyMisexpect() for verification @@ -64,7 +64,7 @@ /// checkExpectAnnotations - compares PGO counters to the thresholds used /// for llvm.expect and warns if the PGO counters are outside of the expected /// range. It extracts the expected weights from the MD_prof weights attatched -/// to the instruction, which are are assumed to come from lowered llvm.expect +/// to the instruction, which are assumed to come from lowered llvm.expect /// intrinsics. The RealWeights parameter and the extracted expected weights are /// then passed to verifyMisexpect() for verification. It is a thin wrapper /// around the checkFrontendInstrumentation and checkBackendInstrumentation APIs diff --git a/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/llvm/include/llvm/Transforms/Utils/ValueMapper.h --- a/llvm/include/llvm/Transforms/Utils/ValueMapper.h +++ b/llvm/include/llvm/Transforms/Utils/ValueMapper.h @@ -90,7 +90,7 @@ /// Instruct the remapper to reuse and mutate distinct metadata (remapping /// them in place) instead of cloning remapped copies. This flag has no - /// effect when when RF_NoModuleLevelChanges, since that implies an identity + /// effect when RF_NoModuleLevelChanges, since that implies an identity /// mapping. RF_ReuseAndMutateDistinctMDs = 4, diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp --- a/llvm/lib/Analysis/CGSCCPassManager.cpp +++ b/llvm/lib/Analysis/CGSCCPassManager.cpp @@ -237,7 +237,7 @@ // rather one pass of the RefSCC creating one child RefSCC at a time. // Ensure we can proxy analysis updates from the CGSCC analysis manager - // into the the Function analysis manager by getting a proxy here. + // into the Function analysis manager by getting a proxy here. // This also needs to update the FunctionAnalysisManager, as this may be // the first time we see this SCC. CGAM.getResult(*C, CG).updateFAM( diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -123,7 +123,7 @@ // meaning that we will use sext instructions instead of zext // instructions to restore the original type. IsSigned = true; - // Make sure at at least one sign bit is included in the result, so it + // Make sure at least one sign bit is included in the result, so it // will get properly sign-extended. ++MaxBitWidth; } diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -2988,7 +2988,7 @@ if (!BlockInsertion.second) return; - // Create a lexical block containing the variables and collect the the + // Create a lexical block containing the variables and collect the // lexical block information for the children. const InsnRange &Range = Ranges.front(); assert(Range.first && Range.second); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -2558,7 +2558,7 @@ DIExpressionCursor ExprCursor(DIExpr); DwarfExpr.addFragmentOffset(DIExpr); - // If the DIExpr is is an Entry Value, we want to follow the same code path + // If the DIExpr is an Entry Value, we want to follow the same code path // regardless of whether the DBG_VALUE is variadic or not. if (DIExpr && DIExpr->isEntryValue()) { // Entry values can only be a single register with no additional DIExpr, diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -3136,7 +3136,7 @@ unsigned BinOpcode = MI.getOpcode(); - // We know know one of the operands is a select of constants. Now verify that + // We know that one of the operands is a select of constants. Now verify that // the other binary operator operand is either a constant, or we can handle a // variable. bool CanFoldNonConst = diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp --- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp +++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp @@ -628,7 +628,7 @@ /// VectorInfo stores abstract the following information for each vector /// element: /// -/// 1) The the memory address loaded into the element as Polynomial +/// 1) The memory address loaded into the element as Polynomial /// 2) a set of load instruction necessary to construct the vector, /// 3) a set of all other instructions that are necessary to create the vector and /// 4) a pointer value that can be used as relative base for all elements. diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp --- a/llvm/lib/CodeGen/MachinePipeliner.cpp +++ b/llvm/lib/CodeGen/MachinePipeliner.cpp @@ -2632,7 +2632,7 @@ /// v1 = phi(v2, v3) /// (Def) v3 = op v1 /// (MO) = v1 -/// If MO appears before Def, then then v1 and v3 may get assigned to the same +/// If MO appears before Def, then v1 and v3 may get assigned to the same /// register. bool SMSchedule::isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, MachineInstr *Def, MachineOperand &MO) { diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -3672,7 +3672,7 @@ if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB)) return ScheduledTrees->test(SchedTreeB); - // Trees with shallower connections have have lower priority. + // Trees with shallower connections have lower priority. if (DFSResult->getSubtreeLevel(SchedTreeA) != DFSResult->getSubtreeLevel(SchedTreeB)) { return DFSResult->getSubtreeLevel(SchedTreeA) diff --git a/llvm/lib/CodeGen/RDFLiveness.cpp b/llvm/lib/CodeGen/RDFLiveness.cpp --- a/llvm/lib/CodeGen/RDFLiveness.cpp +++ b/llvm/lib/CodeGen/RDFLiveness.cpp @@ -171,7 +171,7 @@ SmallSet Defs; // Remove all non-phi defs that are not aliased to RefRR, and separate - // the the remaining defs into buckets for containing blocks. + // the remaining defs into buckets for containing blocks. std::map> Owners; std::map> Blocks; for (NodeId N : DefQ) { diff --git a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp --- a/llvm/lib/CodeGen/ReachingDefAnalysis.cpp +++ b/llvm/lib/CodeGen/ReachingDefAnalysis.cpp @@ -182,7 +182,7 @@ MBBReachingDefs[MBBNumber][Unit].insert(Start, Def); } - // Update reaching def at end of of BB. Keep in mind that these are + // Update reaching def at end of BB. Keep in mind that these are // adjusted relative to the end of the basic block. if (MBBOutRegsInfos[MBBNumber][Unit] < Def - NumInsts) MBBOutRegsInfos[MBBNumber][Unit] = Def - NumInsts; diff --git a/llvm/lib/CodeGen/RegAllocPBQP.cpp b/llvm/lib/CodeGen/RegAllocPBQP.cpp --- a/llvm/lib/CodeGen/RegAllocPBQP.cpp +++ b/llvm/lib/CodeGen/RegAllocPBQP.cpp @@ -192,7 +192,7 @@ void apply(PBQPRAGraph &G) override { LiveIntervals &LIS = G.getMetadata().LIS; - // A minimum spill costs, so that register constraints can can be set + // A minimum spill costs, so that register constraints can be set // without normalization in the [0.0:MinSpillCost( interval. const PBQP::PBQPNum MinSpillCost = 10.0; diff --git a/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp b/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp --- a/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp +++ b/llvm/lib/CodeGen/RemoveRedundantDebugValues.cpp @@ -105,7 +105,7 @@ MachineOperand &Loc = MI.getDebugOperand(0); if (!Loc.isReg()) { - // If it it's not a register, just stop tracking such variable. + // If it's not a register, just stop tracking such variable. if (VMI != VariableMap.end()) VariableMap.erase(VMI); continue; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5162,7 +5162,7 @@ SelectionDAG &DAG) { // We are looking for UMIN(FPTOUI(X), (2^n)-1), which may have come via a // select/vselect/select_cc. The two operands pairs for the select (N2/N3) may - // be truncated versions of the the setcc (N0/N1). + // be truncated versions of the setcc (N0/N1). if ((N0 != N2 && (N2.getOpcode() != ISD::TRUNCATE || N0 != N2.getOperand(0))) || N0.getOpcode() != ISD::FP_TO_UINT || CC != ISD::SETULT) @@ -25213,7 +25213,7 @@ } case ISD::CopyFromReg: - // Always forward past past CopyFromReg. + // Always forward past CopyFromReg. C = C.getOperand(0); return true; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -4815,7 +4815,7 @@ // Check the obvious case. if (A == B) return true; - // For for negative and positive zero. + // For negative and positive zero. if (const ConstantFPSDNode *CA = dyn_cast(A)) if (const ConstantFPSDNode *CB = dyn_cast(B)) if (CA->isZero() && CB->isZero()) return true; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -2081,7 +2081,7 @@ bool IgnoreChains) { if (OptLevel == CodeGenOpt::None) return false; - // If Root use can somehow reach N through a path that that doesn't contain + // If Root use can somehow reach N through a path that doesn't contain // U then folding N would create a cycle. e.g. In the following // diagram, Root can reach N through X. If N is folded into Root, then // X is both a predecessor and a successor of U. diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -556,7 +556,7 @@ #endif // Figure out what lowering strategy we're going to use for each part - // Note: Is is conservatively correct to lower both "live-in" and "live-out" + // Note: It is conservatively correct to lower both "live-in" and "live-out" // as "live-through". A "live-through" variable is one which is "live-in", // "live-out", and live throughout the lifetime of the call (i.e. we can find // it from any PC within the transitive callee of the statepoint). In @@ -1283,7 +1283,7 @@ // All the reloads are independent and are reading memory only modified by // statepoints (i.e. no other aliasing stores); informing SelectionDAG of - // this this let's CSE kick in for free and allows reordering of + // this lets CSE kick in for free and allows reordering of // instructions if possible. The lowering for statepoint sets the root, // so this is ordering all reloads with the either // a) the statepoint node itself, or diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -5888,7 +5888,7 @@ // Multiply the numerator (operand 0) by the magic value. // FIXME: We should support doing a MUL in a wider type. auto GetMULHS = [&](SDValue X, SDValue Y) { - // If the type isn't legal, use a wider mul of the the type calculated + // If the type isn't legal, use a wider mul of the type calculated // earlier. if (!isTypeLegal(VT)) { X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X); @@ -6046,7 +6046,7 @@ // FIXME: We should support doing a MUL in a wider type. auto GetMULHU = [&](SDValue X, SDValue Y) { - // If the type isn't legal, use a wider mul of the the type calculated + // If the type isn't legal, use a wider mul of the type calculated // earlier. if (!isTypeLegal(VT)) { X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X); diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp --- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp +++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp @@ -302,7 +302,7 @@ LineEntry LE(RowAddress, FileIdx, Row.Line); if (RowIndex != RowVector[0] && Row.Address < PrevRow.Address) { // We have seen full duplicate line tables for functions in some - // DWARF files. Watch for those here by checking the the last + // DWARF files. Watch for those here by checking the last // row was the function's end address (HighPC) and that the // current line table entry's address is the same as the first // line entry we already have in our "function_info.Lines". If diff --git a/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp b/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp --- a/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp +++ b/llvm/lib/DebugInfo/LogicalView/Readers/LVELFReader.cpp @@ -1058,7 +1058,7 @@ CurrentSymbol->addLocationConstant(Attr, *FormValue.getAsUnsignedConstant(), OffsetOnEntry); else - // This is a a location description, or a reference to one. + // This is a location description, or a reference to one. processLocationList(Attr, FormValue, Die, OffsetOnEntry); } diff --git a/llvm/lib/Debuginfod/HTTPServer.cpp b/llvm/lib/Debuginfod/HTTPServer.cpp --- a/llvm/lib/Debuginfod/HTTPServer.cpp +++ b/llvm/lib/Debuginfod/HTTPServer.cpp @@ -45,7 +45,7 @@ Request.setResponse({404u, "text/plain", "Could not memory-map file.\n"}); return false; } - // Lambdas are copied on conversion to to std::function, preventing use of + // Lambdas are copied on conversion to std::function, preventing use of // smart pointers. MemoryBuffer *MB = MBOrErr->release(); Request.setResponse({200u, "application/octet-stream", MB->getBufferSize(), diff --git a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp --- a/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp +++ b/llvm/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp @@ -447,7 +447,7 @@ rec.CodeAddr = CodeAddr; rec.NrEntry = Lines.size(); - // compute total size size of record (variable due to filenames) + // compute total size of record (variable due to filenames) DILineInfoTable::iterator Begin = Lines.begin(); DILineInfoTable::iterator End = Lines.end(); for (DILineInfoTable::iterator It = Begin; It != End; ++It) { diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -301,7 +301,7 @@ // won't be interleaved between modules. It is also used in mapSectionAddress // and resolveRelocations to protect write access to internal data structures. // - // loadObject may be called on the same thread during the handling of of + // loadObject may be called on the same thread during the handling of // processRelocations, and that's OK. The handling of the relocation lists // is written in such a way as to work correctly if new elements are added to // the end of the list while the list is being processed. diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -5686,7 +5686,7 @@ "vector_extract index must be a constant multiple of " "the result type's known minimum vector length."); - // If this extraction is not the 'mixed' case where a fixed vector is is + // If this extraction is not the 'mixed' case where a fixed vector is // extracted from a scalable vector, ensure that the extraction does not // overrun the parent vector. if (VecEC.isScalable() == ResultEC.isScalable()) { diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp --- a/llvm/lib/MC/WasmObjectWriter.cpp +++ b/llvm/lib/MC/WasmObjectWriter.cpp @@ -550,7 +550,7 @@ TargetObjectWriter->getRelocType(Target, Fixup, FixupSection, IsLocRel); // Absolute offset within a section or a function. - // Currently only supported for for metadata sections. + // Currently only supported for metadata sections. // See: test/MC/WebAssembly/blockaddress.ll if ((Type == wasm::R_WASM_FUNCTION_OFFSET_I32 || Type == wasm::R_WASM_FUNCTION_OFFSET_I64 || diff --git a/llvm/lib/MC/XCOFFObjectWriter.cpp b/llvm/lib/MC/XCOFFObjectWriter.cpp --- a/llvm/lib/MC/XCOFFObjectWriter.cpp +++ b/llvm/lib/MC/XCOFFObjectWriter.cpp @@ -1185,7 +1185,7 @@ } // The size of the tail padding in a section is the end virtual address of - // the current section minus the the end virtual address of the last csect + // the current section minus the end virtual address of the last csect // in that section. if (uint64_t PaddingSize = CsectEntry.Address + CsectEntry.Size - CurrentAddressLocation) { diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp --- a/llvm/lib/Object/ArchiveWriter.cpp +++ b/llvm/lib/Object/ArchiveWriter.cpp @@ -500,7 +500,7 @@ // UniqueTimestamps is a special case to improve debugging on Darwin: // // The Darwin linker does not link debug info into the final - // binary. Instead, it emits entries of type N_OSO in in the output + // binary. Instead, it emits entries of type N_OSO in the output // binary's symbol table, containing references to the linked-in // object files. Using that reference, the debugger can read the // debug data directly from the object files. Alternatively, an diff --git a/llvm/lib/Support/Format.cpp b/llvm/lib/Support/Format.cpp --- a/llvm/lib/Support/Format.cpp +++ b/llvm/lib/Support/Format.cpp @@ -294,7 +294,7 @@ } // parse specifier; verify that the character is a valid specifier given - // restrictions imposed by by the use of flags and precision values + // restrictions imposed by the use of flags and precision values char Next = *Fmt; if (Next == 0) return; diff --git a/llvm/lib/Support/Host.cpp b/llvm/lib/Support/Host.cpp --- a/llvm/lib/Support/Host.cpp +++ b/llvm/lib/Support/Host.cpp @@ -1794,7 +1794,7 @@ // EAX=0x7, ECX=0x0 indicates the availability of the instruction (via the 18th // bit of EDX), while the EAX=0x1b leaf returns information on the // availability of specific pconfig leafs. - // The target feature here only refers to the the first of these two. + // The target feature here only refers to the first of these two. // Users might need to check for the availability of specific pconfig // leaves using cpuid, since that information is ignored while // detecting features using the "-march=native" flag. diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp --- a/llvm/lib/Support/RISCVISAInfo.cpp +++ b/llvm/lib/Support/RISCVISAInfo.cpp @@ -390,7 +390,7 @@ return createStringError(errc::invalid_argument, Error); } - // If experimental extension, require use of current version number number + // If experimental extension, require use of current version number if (auto ExperimentalExtension = isExperimentalExtension(Ext)) { if (!EnableExperimentalExtension) { std::string Error = "requires '-menable-experimental-extensions' for " diff --git a/llvm/lib/Support/SourceMgr.cpp b/llvm/lib/Support/SourceMgr.cpp --- a/llvm/lib/Support/SourceMgr.cpp +++ b/llvm/lib/Support/SourceMgr.cpp @@ -113,7 +113,7 @@ return llvm::lower_bound(Offsets, PtrOffset) - Offsets.begin() + 1; } -/// Look up a given \p Ptr in in the buffer, determining which line it came +/// Look up a given \p Ptr in the buffer, determining which line it came /// from. unsigned SourceMgr::SrcBuffer::getLineNumber(const char *Ptr) const { size_t Sz = Buffer->getBufferSize(); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10729,7 +10729,7 @@ }; // For OP_MOVLANE shuffles, the RHSID represents the lane to move into. We - // get the lane to move from from the PFID, which is always from the + // get the lane to move from the PFID, which is always from the // original vectors (V1 or V2). SDValue OpLHS = GeneratePerfectShuffle( LHSID, V1, V2, PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); @@ -15112,7 +15112,7 @@ return SDValue(); // Multiplication of a power of two plus/minus one can be done more - // cheaply as as shift+add/sub. For now, this is true unilaterally. If + // cheaply as shift+add/sub. For now, this is true unilaterally. If // future CPUs have a cheaper MADD instruction, this may need to be // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and // 64-bit is 5 cycles, so this is always a win. diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -108,7 +108,7 @@ /// Returns the base register operator of a load/store. static const MachineOperand &getLdStBaseOp(const MachineInstr &MI); - /// Returns the the immediate offset operator of a load/store. + /// Returns the immediate offset operator of a load/store. static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI); /// Returns whether the instruction is FP or NEON. diff --git a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp --- a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp +++ b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp @@ -60,7 +60,7 @@ /// Lower a HOM_Prolog pseudo instruction into a helper call /// or a sequence of homogeneous stores. - /// When a a fp setup follows, it can be optimized. + /// When a fp setup follows, it can be optimized. bool lowerProlog(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI); /// Lower a HOM_Epilog pseudo instruction into a helper call diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -2383,7 +2383,7 @@ // FIXME: BigEndian requires an additional REV instruction to satisfy the // constraint that none of the bits change when stored to memory as one - // type, and and reloaded as another type. + // type, and reloaded as another type. let Predicates = [IsLE] in { def : Pat<(nxv16i8 (bitconvert (nxv8i16 ZPR:$src))), (nxv16i8 ZPR:$src)>; def : Pat<(nxv16i8 (bitconvert (nxv4i32 ZPR:$src))), (nxv16i8 ZPR:$src)>; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -972,7 +972,7 @@ // MO_TAGGED on the page indicates a tagged address. Set the tag now. We do so // by creating a MOVK that sets bits 48-63 of the register to (global address // + 0x100000000 - PC) >> 48. The additional 0x100000000 offset here is to - // prevent an incorrect tag being generated during relocation when the the + // prevent an incorrect tag being generated during relocation when the // global appears before the code section. Without the offset, a global at // `0x0f00'0000'0000'1000` (i.e. at `0x1000` with tag `0xf`) that's referenced // by code at `0x2000` would result in `0x0f00'0000'0000'1000 - 0x2000 = diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -138,7 +138,7 @@ APInt ConstValue = Const->Value.sext(Ty.getSizeInBits()); // The following code is ported from AArch64ISelLowering. // Multiplication of a power of two plus/minus one can be done more - // cheaply as as shift+add/sub. For now, this is true unilaterally. If + // cheaply as shift+add/sub. For now, this is true unilaterally. If // future CPUs have a cheaper MADD instruction, this may need to be // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and // 64-bit is 5 cycles, so this is always a win. diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp @@ -228,7 +228,7 @@ // Try simplify G_UADDO with 8 or 16 bit operands to wide G_ADD and TBNZ if // result is only used in the no-overflow case. It is restricted to cases // where we know that the high-bits of the operands are 0. If there's an - // overflow, then the the 9th or 17th bit must be set, which can be checked + // overflow, then the 9th or 17th bit must be set, which can be checked // using TBNZ. // // Change (for UADDOs on 8 and 16 bits): diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -5035,7 +5035,7 @@ // s16 -> <2 x s16>, and <3 x s16> -> <4 x s16>, LLT RoundedTy; - // S32 vector to to cover all data, plus TFE result element. + // S32 vector to cover all data, plus TFE result element. LLT TFETy; // Register type to use for each loaded component. Will be S32 or V2S16. diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10375,7 +10375,7 @@ // If it's free to do so, push canonicalizes further up the source, which may // find a canonical source. // - // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for + // TODO: More opcodes. Note this is unsafe for the _ieee minnum/maxnum for // sNaNs. if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { auto *CRHS = dyn_cast(N0.getOperand(1)); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -723,7 +723,7 @@ } /// \returns true if this is an s_store_dword* instruction. This is more - /// specific than than isSMEM && mayStore. + /// specific than isSMEM && mayStore. static bool isScalarStore(const MachineInstr &MI) { return MI.getDesc().TSFlags & SIInstrFlags::SCALAR_STORE; } diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -358,7 +358,7 @@ // as the input registers. Register ScratchRSrcReg = AMDGPU::PRIVATE_RSRC_REG; - // This is the the unswizzled offset from the current dispatch's scratch wave + // This is the unswizzled offset from the current dispatch's scratch wave // base to the beginning of the current function's frame. Register FrameOffsetReg = AMDGPU::FP_REG; @@ -462,7 +462,7 @@ // VGPR used for SGPR spills Register VGPR; - // If the VGPR is is used for SGPR spills in a non-entrypoint function, the + // If the VGPR is used for SGPR spills in a non-entrypoint function, the // stack slot used to save/restore it in the prolog/epilog. Optional FI; diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp --- a/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp +++ b/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp @@ -36,7 +36,7 @@ /// the instructions in bb.then will only overwrite lanes that will never be /// accessed in bb.else. /// -/// This pass aims to to tell register allocator that %a is in-fact dead, +/// This pass aims to tell register allocator that %a is in-fact dead, /// through inserting a phi-node in bb.flow saying that %a is undef when coming /// from bb.then, and then replace the uses in the bb.else with the result of /// newly inserted phi. diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp --- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -6198,7 +6198,7 @@ // We have to check if the instruction is MRRC2 // or MCRR2 when constructing the operands for // Inst. Reason is because MRRC2 stores to two - // registers so it's tablegen desc has has two + // registers so it's tablegen desc has two // outputs whereas MCRR doesn't store to any // registers so all of it's operands are listed // as inputs, therefore the operand order for diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h b/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h --- a/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h +++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRFixupKinds.h @@ -68,10 +68,10 @@ /// with the upper 8 bits of a negated 16-bit value (bits 8-15). fixup_hi8_ldi_neg, /// Replaces the immediate operand of a 16-bit `Rd, K` instruction - /// with the upper 8 bits of a negated negated 24-bit value (bits 16-23). + /// with the upper 8 bits of a negated 24-bit value (bits 16-23). fixup_hh8_ldi_neg, /// Replaces the immediate operand of a 16-bit `Rd, K` instruction - /// with the upper 8 bits of a negated negated 32-bit value (bits 24-31). + /// with the upper 8 bits of a negated 32-bit value (bits 24-31). fixup_ms8_ldi_neg, /// Replaces the immediate operand of a 16-bit `Rd, K` instruction diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp @@ -651,7 +651,7 @@ bool HexagonShuffler::shuffle() { if (size() > HEXAGON_PACKET_SIZE) { - // Ignore a packet with with more than what a packet can hold + // Ignore a packet with more than what a packet can hold // or with compound or duplex insns for now. reportError("invalid instruction packet"); return false; diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.h b/llvm/lib/Target/M68k/M68kInstrInfo.h --- a/llvm/lib/Target/M68k/M68kInstrInfo.h +++ b/llvm/lib/Target/M68k/M68kInstrInfo.h @@ -322,7 +322,7 @@ bool ExpandMOVEM(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, bool IsRM) const; - /// Return a virtual register initialized with the the global base register + /// Return a virtual register initialized with the global base register /// value. Output instructions required to initialize the register in the /// function entry block, if necessary. unsigned getGlobalBaseReg(MachineFunction *MF) const; diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/llvm/lib/Target/M68k/M68kInstrInfo.cpp --- a/llvm/lib/Target/M68k/M68kInstrInfo.cpp +++ b/llvm/lib/Target/M68k/M68kInstrInfo.cpp @@ -770,7 +770,7 @@ M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DstReg), FrameIndex); } -/// Return a virtual register initialized with the the global base register +/// Return a virtual register initialized with the global base register /// value. Output instructions required to initialize the register in the /// function entry block, if necessary. /// diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -3022,7 +3022,7 @@ (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>; // Directly initializing underlying the b32 register is one less SASS - // instruction than than vector-packing move. + // instruction than vector-packing move. def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src), "mov.b32 \t$dst, $src;", []>; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -5236,7 +5236,7 @@ // inserted into the DAG as part of call lowering. The restore of the TOC // pointer is modeled by using a pseudo instruction for the call opcode that // represents the 2 instruction sequence of an indirect branch and link, - // immediately followed by a load of the TOC pointer from the the stack save + // immediately followed by a load of the TOC pointer from the stack save // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC // as it is not saved or used. RetOpc = isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h @@ -61,7 +61,7 @@ const TargetRegisterInfo *TRI) const override; // Get the first stack adjustment amount for SplitSPAdjust. - // Return 0 if we don't want to to split the SP adjustment in prologue and + // Return 0 if we don't want to split the SP adjustment in prologue and // epilogue. uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const; diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1131,7 +1131,7 @@ case AtomicOrdering::AcquireRelease: case AtomicOrdering::SequentiallyConsistent: // Generate "fencem 3" as acq_rel and seq_cst fence. - // FIXME: "fencem 3" doesn't wait for for PCIe deveices accesses, + // FIXME: "fencem 3" doesn't wait for PCIe deveices accesses, // so seq_cst may require more instruction for them. return SDValue(DAG.getMachineNode(VE::FENCEM, DL, MVT::Other, DAG.getTargetConstant(3, DL, MVT::i32), diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -362,7 +362,7 @@ // to TargetOptions and MCAsmInfo. But when clang compiles bitcode directly, // clang's LangOptions is not used and thus the exception model info is not // correctly transferred to TargetOptions and MCAsmInfo, so we make sure we - // have the correct exception model in in WebAssemblyMCAsmInfo constructor. + // have the correct exception model in WebAssemblyMCAsmInfo constructor. // But in this case TargetOptions is still not updated, so we make sure they // are the same. TM->Options.ExceptionModel = TM->getMCAsmInfo()->getExceptionHandlingType(); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -420,7 +420,7 @@ // - If it's not the fragment where the previous instruction is, // returns true. // - If it's the fragment holding the previous instruction but its - // size changed since the the previous instruction was emitted into + // size changed since the previous instruction was emitted into // it, returns true. // - Otherwise returns false. // - If the fragment is not a DataFragment, returns false. @@ -571,7 +571,7 @@ if (!needAlign(Inst) || !PendingBA) return; - // Tie the aligned instructions into a a pending BoundaryAlign. + // Tie the aligned instructions into a pending BoundaryAlign. PendingBA->setLastFragment(CF); PendingBA = nullptr; diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1030,7 +1030,7 @@ break; } case ISD::VSELECT: { - // Replace VSELECT with non-mask conditions with with BLENDV. + // Replace VSELECT with non-mask conditions with BLENDV. if (N->getOperand(0).getValueType().getVectorElementType() == MVT::i1) break; @@ -3701,7 +3701,7 @@ } if (Subtarget->hasBMI2()) { - // Great, just emit the the BZHI.. + // Great, just emit the BZHI.. if (NVT != MVT::i32) { // But have to place the bit count into the wide-enough register first. NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -6898,7 +6898,7 @@ unsigned ShiftLeft = NumElems - SubVecNumElems; unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal; - // Do an optimization for the the most frequently used types. + // Do an optimization for the most frequently used types. if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) { APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems); Mask0.flipAllBits(); diff --git a/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp b/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp --- a/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp +++ b/llvm/lib/Target/X86/X86IndirectBranchTracking.cpp @@ -182,7 +182,7 @@ break; } else if (I->isEHLabel()) { // Old Landingpad BB (is not Landingpad now) with - // the the old "callee" EHLabel. + // the old "callee" EHLabel. MCSymbol *Sym = I->getOperand(0).getMCSymbol(); if (!MF.hasCallSiteLandingPad(Sym)) continue; diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1447,7 +1447,7 @@ if (MinSize == 2 && Subtarget->is32Bit() && Subtarget->isTargetWindowsMSVC() && (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) { - // For compatibility reasons, when targetting MSVC, is is important to + // For compatibility reasons, when targetting MSVC, it is important to // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools // rely specifically on this pattern to be able to patch a function. // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE. diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp --- a/llvm/lib/Transforms/IPO/IROutliner.cpp +++ b/llvm/lib/Transforms/IPO/IROutliner.cpp @@ -559,7 +559,7 @@ // Iterate over the operands in an instruction. If the global value number, // assigned by the IRSimilarityCandidate, has been seen before, we check if - // the the number has been found to be not the same value in each instance. + // the number has been found to be not the same value in each instance. for (Value *V : ID.OperVals) { Optional GVNOpt = C.getGVN(V); assert(GVNOpt && "Expected a GVN for operand?"); @@ -766,7 +766,7 @@ } } -/// Find the the constants that will need to be lifted into arguments +/// Find the constants that will need to be lifted into arguments /// as they are not the same in each instance of the region. /// /// \param [in] C - The IRSimilarityCandidate containing the region we are diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp --- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp +++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp @@ -1088,7 +1088,7 @@ replaceCfiUses(F, FDecl, isJumpTableCanonical); // Set visibility late because it's used in replaceCfiUses() to determine - // whether uses need to to be replaced. + // whether uses need to be replaced. F->setVisibility(Visibility); } diff --git a/llvm/lib/Transforms/IPO/PartialInlining.cpp b/llvm/lib/Transforms/IPO/PartialInlining.cpp --- a/llvm/lib/Transforms/IPO/PartialInlining.cpp +++ b/llvm/lib/Transforms/IPO/PartialInlining.cpp @@ -163,7 +163,7 @@ // The dominating block of the region to be outlined. BasicBlock *NonReturnBlock = nullptr; - // The set of blocks in Entries that that are predecessors to ReturnBlock + // The set of blocks in Entries that are predecessors to ReturnBlock SmallVector ReturnBlockPreds; }; diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -1872,7 +1872,7 @@ // on the profile to favor more inlining. This is only a problem with CS // profile. // 3. Transitive indirect call edges due to inlining. When a callee function - // (say B) is inlined into into a caller function (say A) in LTO prelink, + // (say B) is inlined into a caller function (say A) in LTO prelink, // every call edge originated from the callee B will be transferred to // the caller A. If any transferred edge (say A->C) is indirect, the // original profiled indirect edge B->C, even if considered, would not diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2552,7 +2552,7 @@ auto *DstTy = dyn_cast(ReturnType); auto *VecTy = dyn_cast(Vec->getType()); - // Only canonicalize if the the destination vector and Vec are fixed + // Only canonicalize if the destination vector and Vec are fixed // vectors. if (DstTy && VecTy) { unsigned DstNumElts = DstTy->getNumElements(); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -3880,7 +3880,7 @@ Value *StartV = StartU->get(); BasicBlock *StartBB = PN->getIncomingBlock(*StartU); bool StartNeedsFreeze = !isGuaranteedNotToBeUndefOrPoison(StartV); - // We can't insert freeze if the the start value is the result of the + // We can't insert freeze if the start value is the result of the // terminator (e.g. an invoke). if (StartNeedsFreeze && StartBB->getTerminator() == StartV) return nullptr; diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -537,7 +537,7 @@ /// getShadowTy([n x T]) = [n x getShadowTy(T)] /// getShadowTy(other type) = i16 Type *getShadowTy(Type *OrigTy); - /// Returns the shadow type of of V's type. + /// Returns the shadow type of V's type. Type *getShadowTy(Value *V); const uint64_t NumOfElementsInArgOrgTLS = ArgTLSSize / OriginWidthBytes; diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp --- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -1101,7 +1101,7 @@ // `End`, decrementing by one every time. // // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the - // induction variable is decreasing we know that that the smallest value + // induction variable is decreasing we know that the smallest value // the loop body is actually executed with is `INT_SMIN` == `Smallest`. // // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -992,7 +992,7 @@ // loop invariant). If so make them unconditional by moving them to their // immediate dominator. We iterate through the instructions in reverse order // which ensures that when we rehoist an instruction we rehoist its operands, - // and also keep track of where in the block we are rehoisting to to make sure + // and also keep track of where in the block we are rehoisting to make sure // that we rehoist instructions before the instructions that use them. Instruction *HoistPoint = nullptr; if (ControlFlowHoisting) { diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp --- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp +++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp @@ -1381,7 +1381,7 @@ } // Walk through all uses in FC1. For each use, find the reaching def. If the - // def is located in FC0 then it is is not safe to fuse. + // def is located in FC0 then it is not safe to fuse. for (BasicBlock *BB : FC1.L->blocks()) for (Instruction &I : *BB) for (auto &Op : I.operands()) @@ -1461,7 +1461,7 @@ /// 2. The successors of the guard have the same flow into/around the loop. /// If the compare instructions are identical, then the first successor of the /// guard must go to the same place (either the preheader of the loop or the - /// NonLoopBlock). In other words, the the first successor of both loops must + /// NonLoopBlock). In other words, the first successor of both loops must /// both go into the loop (i.e., the preheader) or go around the loop (i.e., /// the NonLoopBlock). The same must be true for the second successor. bool haveIdenticalGuards(const FusionCandidate &FC0, @@ -1594,7 +1594,7 @@ // first, or undef otherwise. This is sound as exiting the first implies the // second will exit too, __without__ taking the back-edge. [Their // trip-counts are equal after all. - // KB: Would this sequence be simpler to just just make FC0.ExitingBlock go + // KB: Would this sequence be simpler to just make FC0.ExitingBlock go // to FC1.Header? I think this is basically what the three sequences are // trying to accomplish; however, doing this directly in the CFG may mean // the DT/PDT becomes invalid diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp --- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp +++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp @@ -280,7 +280,7 @@ Instruction *findInsertPt(Instruction *User, ArrayRef Ops); /// Same as above, *except* that this uses the SCEV definition of invariant /// which is that an expression *can be made* invariant via SCEVExpander. - /// Thus, this version is only suitable for finding an insert point to be be + /// Thus, this version is only suitable for finding an insert point to be /// passed to SCEVExpander! Instruction *findInsertPt(const SCEVExpander &Expander, Instruction *User, ArrayRef Ops); diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -1937,7 +1937,7 @@ /// Returns true if \p V is a matrix value in the given subprogram. bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); } - /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to + /// If \p V is a matrix value, print its shape as NumRows x NumColumns to /// \p SS. void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) { auto M = Inst2Matrix.find(V); diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -3533,7 +3533,7 @@ // the second. We only want it to be less than if the DFS orders are equal. // // Each LLVM instruction only produces one value, and thus the lowest-level - // differentiator that really matters for the stack (and what we use as as a + // differentiator that really matters for the stack (and what we use as a // replacement) is the local dfs number. // Everything else in the structure is instruction level, and only affects // the order in which we will replace operands of a given instruction. diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp --- a/llvm/lib/Transforms/Utils/LoopPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -496,7 +496,7 @@ /// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to /// go to exit. /// Then, Estimated ExitCount = F / E. -/// For I-th (counting from 0) peeled off iteration we set the the weights for +/// For I-th (counting from 0) peeled off iteration we set the weights for /// the peeled exit as (EC - I, 1). It gives us reasonable distribution, /// The probability to go to exit 1/(EC-I) increases. At the same time /// the estimated exit count in the remainder loop reduces by I. diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -1390,7 +1390,7 @@ // Note that we must not perform expansions until after // we query *all* the costs, because if we perform temporary expansion // inbetween, one that we might not intend to keep, said expansion - // *may* affect cost calculation of the the next SCEV's we'll query, + // *may* affect cost calculation of the next SCEV's we'll query, // and next SCEV may errneously get smaller cost. // Collect all the candidate PHINodes to be rewritten. diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -1349,7 +1349,7 @@ if (isOnlyUsedInEqualityComparison(CI, SrcStr)) // S is dereferenceable so it's safe to load from it and fold // memchr(S, C, N) == S to N && *S == C for any C and N. - // TODO: This is safe even even for nonconstant S. + // TODO: This is safe even for nonconstant S. return memChrToCharCompare(CI, Size, B, DL); // From now on we need a constant length and constant array. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7025,7 +7025,7 @@ // With the exception of GEPs and PHIs, after scalarization there should // only be one copy of the instruction generated in the loop. This is // because the VF is either 1, or any instructions that need scalarizing - // have already been dealt with by the the time we get here. As a result, + // have already been dealt with by the time we get here. As a result, // it means we don't have to multiply the instruction cost by VF. assert(I->getOpcode() == Instruction::GetElementPtr || I->getOpcode() == Instruction::PHI || @@ -8337,7 +8337,7 @@ if (ShouldUseVectorIntrinsic) return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()), ID); - // Is better to call a vectorized version of the function than to to scalarize + // Is better to call a vectorized version of the function than to scalarize // the call? auto ShouldUseVectorCall = LoopVectorizationPlanner::getDecisionAndClampRange( [&](ElementCount VF) -> bool { diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -523,7 +523,7 @@ return S.OpValue; } -/// \returns true if \p Opcode is allowed as part of of the main/alternate +/// \returns true if \p Opcode is allowed as part of the main/alternate /// instruction for SLP vectorization. /// /// Example of unsupported opcode is SDIV that can potentially cause UB if the @@ -2018,7 +2018,7 @@ for (int Pass = 0; Pass != 2; ++Pass) { // Check if no need to reorder operands since they're are perfect or // shuffled diamond match. - // Need to to do it to avoid extra external use cost counting for + // Need to do it to avoid extra external use cost counting for // shuffled matches, which may cause regressions. if (SkipReordering()) break; @@ -2328,7 +2328,7 @@ /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the /// stores in \p StoresVec can form a vector instruction. If so it returns true - /// and populates \p ReorderIndices with the shuffle indices of the the stores + /// and populates \p ReorderIndices with the shuffle indices of the stores /// when compared to the sorted vector. bool canFormVector(const SmallVector &StoresVec, OrdersType &ReorderIndices) const; @@ -5786,7 +5786,7 @@ ReuseShuffleIndicies); TE->setOperandsInOrder(); for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { - // For scalar operands no need to to create an entry since no need to + // For scalar operands no need to create an entry since no need to // vectorize it. if (isVectorIntrinsicWithScalarOpAtArg(ID, i)) continue; diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -656,7 +656,7 @@ }; /// VPRecipeBase is a base class modeling a sequence of one or more output IR -/// instructions. VPRecipeBase owns the the VPValues it defines through VPDef +/// instructions. VPRecipeBase owns the VPValues it defines through VPDef /// and is responsible for deleting its defined values. Single-value /// VPRecipeBases that also inherit from VPValue must make sure to inherit from /// VPRecipeBase before VPValue. diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp --- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp @@ -323,7 +323,7 @@ // 2. Process outermost loop exit. We created an empty VPBB for the loop // single exit BB during the RPO traversal of the loop body but Instructions - // weren't visited because it's not part of the the loop. + // weren't visited because it's not part of the loop. BasicBlock *LoopExitBB = TheLoop->getUniqueExitBlock(); assert(LoopExitBB && "Loops with multiple exits are not supported."); VPBasicBlock *LoopExitVPBB = BB2VPBB[LoopExitBB]; diff --git a/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll b/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll --- a/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll +++ b/llvm/test/DebugInfo/X86/basic-block-sections-debug-loclist-2.ll @@ -17,7 +17,7 @@ ; extern bool b; ; extern int x; ; void test() { -; // i's value is 7 for the first call in in the if block. With basic +; // i's value is 7 for the first call in the if block. With basic ; // block sections, this would split the range across sections and would ; // result in an extra entry than without sections. ; int i = 7; diff --git a/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp b/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp --- a/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp +++ b/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp @@ -179,7 +179,7 @@ /// Determine the virtual address that is considered the base address of an ELF /// object file. /// -/// The base address of an ELF file is the the "p_vaddr" of the first program +/// The base address of an ELF file is the "p_vaddr" of the first program /// header whose "p_type" is PT_LOAD. /// /// \param ELFFile An ELF object file we will search. diff --git a/llvm/tools/llvm-profgen/CSPreInliner.cpp b/llvm/tools/llvm-profgen/CSPreInliner.cpp --- a/llvm/tools/llvm-profgen/CSPreInliner.cpp +++ b/llvm/tools/llvm-profgen/CSPreInliner.cpp @@ -172,7 +172,7 @@ (NormalizationUpperBound - NormalizationLowerBound); if (NormalizedHotness > 1.0) NormalizedHotness = 1.0; - // Add 1 to to ensure hot callsites get a non-zero threshold, which could + // Add 1 to ensure hot callsites get a non-zero threshold, which could // happen when SampleColdCallSiteThreshold is 0. This is when we do not // want any inlining for cold callsites. SampleThreshold = SampleHotCallSiteThreshold * NormalizedHotness * 100 + diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp --- a/llvm/tools/llvm-readobj/ELFDumper.cpp +++ b/llvm/tools/llvm-readobj/ELFDumper.cpp @@ -4752,7 +4752,7 @@ return; std::vector ChainLen(NBucket, 0); - // Go over all buckets and and note chain lengths of each bucket (total + // Go over all buckets and note chain lengths of each bucket (total // unique chain lengths). for (size_t B = 0; B < NBucket; B++) { BitVector Visited(NChain); diff --git a/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp b/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp --- a/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp +++ b/llvm/tools/llvm-reduce/deltas/ReduceOperandsSkip.cpp @@ -149,7 +149,7 @@ // Regardless whether referenced, add the function arguments as // replacement possibility with the goal of reducing the number of (used) - // function arguments, possibly created by the the operands-to-args. + // function arguments, possibly created by the operands-to-args. for (Argument &Arg : F.args()) ReferencedVals.insert(&Arg); @@ -179,7 +179,7 @@ std::reverse(Candidates.begin(), Candidates.end()); // Independency of collectReferencedValues's idea of reductive power, - // ensure the the partial order of IsMoreReduced is enforced. + // ensure the partial order of IsMoreReduced is enforced. llvm::stable_sort(Candidates, IsMoreReduced); Callback(Op, Candidates); diff --git a/llvm/tools/obj2yaml/elf2yaml.cpp b/llvm/tools/obj2yaml/elf2yaml.cpp --- a/llvm/tools/obj2yaml/elf2yaml.cpp +++ b/llvm/tools/obj2yaml/elf2yaml.cpp @@ -300,7 +300,7 @@ } // Normally an object that does not have sections has e_shnum == 0. - // Also, e_shnum might be 0, when the the number of entries in the section + // Also, e_shnum might be 0, when the number of entries in the section // header table is larger than or equal to SHN_LORESERVE (0xff00). In this // case the real number of entries is held in the sh_size member of the // initial entry. We have a section header table when `e_shoff` is not 0. diff --git a/llvm/unittests/CodeGen/InstrRefLDVTest.cpp b/llvm/unittests/CodeGen/InstrRefLDVTest.cpp --- a/llvm/unittests/CodeGen/InstrRefLDVTest.cpp +++ b/llvm/unittests/CodeGen/InstrRefLDVTest.cpp @@ -3154,7 +3154,7 @@ VLocs[1].Vars.clear(); // Test that we can eliminate PHIs. A PHI will be placed at the loop head - // because there's a def in in. + // because there's a def in it. MInLocs[1][0] = LiveInRsp; MOutLocs[1][0] = LiveInRsp; VLocs[0].Vars.insert({Var, DbgValue(LiveInRspID, EmptyProps)}); diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp --- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp +++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp @@ -159,7 +159,7 @@ EXPECT_EQ(V2P1, V2P0.changeElementType(P1)); EXPECT_EQ(V2S32, V2P0.changeElementType(S32)); - // Similar tests for for scalable vectors. + // Similar tests for scalable vectors. const LLT NXV2S32 = LLT::scalable_vector(2, 32); const LLT NXV2S64 = LLT::scalable_vector(2, 64); diff --git a/llvm/utils/TableGen/CodeGenRegisters.h b/llvm/utils/TableGen/CodeGenRegisters.h --- a/llvm/utils/TableGen/CodeGenRegisters.h +++ b/llvm/utils/TableGen/CodeGenRegisters.h @@ -361,7 +361,7 @@ llvm_unreachable("VTNum greater than number of ValueTypes in RegClass!"); } - // Return true if this this class contains the register. + // Return true if this class contains the register. bool contains(const CodeGenRegister*) const; // Returns true if RC is a subclass. diff --git a/llvm/utils/vscode/llvm/language-configuration-tablegen.json b/llvm/utils/vscode/llvm/language-configuration-tablegen.json --- a/llvm/utils/vscode/llvm/language-configuration-tablegen.json +++ b/llvm/utils/vscode/llvm/language-configuration-tablegen.json @@ -19,7 +19,7 @@ ["\"", "\""], ["'", "'"] ], - // symbols that that can be used to surround a selection + // symbols that can be used to surround a selection "surroundingPairs": [ ["{", "}"], ["[", "]"], diff --git a/mlir/include/mlir-c/AffineMap.h b/mlir/include/mlir-c/AffineMap.h --- a/mlir/include/mlir-c/AffineMap.h +++ b/mlir/include/mlir-c/AffineMap.h @@ -100,7 +100,7 @@ /// context. The permutation expression is a non-empty vector of integers. /// The elements of the permutation vector must be continuous from 0 and cannot /// be repeated (i.e. `[1,2,0]` is a valid permutation. `[2,0]` or `[1,1,2]` is -/// an invalid invalid permutation.) The affine map is owned by the context. +/// an invalid permutation.) The affine map is owned by the context. MLIR_CAPI_EXPORTED MlirAffineMap mlirAffineMapPermutationGet( MlirContext ctx, intptr_t size, unsigned *permutation); diff --git a/mlir/include/mlir/Analysis/DataLayoutAnalysis.h b/mlir/include/mlir/Analysis/DataLayoutAnalysis.h --- a/mlir/include/mlir/Analysis/DataLayoutAnalysis.h +++ b/mlir/include/mlir/Analysis/DataLayoutAnalysis.h @@ -26,7 +26,7 @@ /// Constructs the data layouts. explicit DataLayoutAnalysis(Operation *root); - /// Returns the data layout active active at the given operation, that is the + /// Returns the data layout active at the given operation, that is the /// data layout specified by the closest ancestor that can specify one, or the /// default layout if there is no such ancestor. const DataLayout &getAbove(Operation *operation) const; diff --git a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h --- a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h +++ b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h @@ -679,7 +679,7 @@ /// false otherwise. bool hasInvalidConstraint() const; - /// Returns the constant lower bound bound if isLower is true, and the upper + /// Returns the constant lower bound if isLower is true, and the upper /// bound if isLower is false. template Optional computeConstantLowerOrUpperBound(unsigned pos); diff --git a/mlir/include/mlir/Analysis/SliceAnalysis.h b/mlir/include/mlir/Analysis/SliceAnalysis.h --- a/mlir/include/mlir/Analysis/SliceAnalysis.h +++ b/mlir/include/mlir/Analysis/SliceAnalysis.h @@ -42,7 +42,7 @@ /// /// Upon return to the root call, `forwardSlice` is filled with a /// postorder list of uses (i.e. a reverse topological order). To get a proper -/// topological order, we just just reverse the order in `forwardSlice` before +/// topological order, we just reverse the order in `forwardSlice` before /// returning. /// /// Example starting from node 0 diff --git a/mlir/include/mlir/Conversion/LLVMCommon/LoweringOptions.h b/mlir/include/mlir/Conversion/LLVMCommon/LoweringOptions.h --- a/mlir/include/mlir/Conversion/LLVMCommon/LoweringOptions.h +++ b/mlir/include/mlir/Conversion/LLVMCommon/LoweringOptions.h @@ -35,7 +35,7 @@ bool useBarePtrCallConv = false; enum class AllocLowering { - /// Use malloc for for heap allocations. + /// Use malloc for heap allocations. Malloc, /// Use aligned_alloc for heap allocations. diff --git a/mlir/include/mlir/Conversion/LLVMCommon/TypeConverter.h b/mlir/include/mlir/Conversion/LLVMCommon/TypeConverter.h --- a/mlir/include/mlir/Conversion/LLVMCommon/TypeConverter.h +++ b/mlir/include/mlir/Conversion/LLVMCommon/TypeConverter.h @@ -169,7 +169,7 @@ /// Convert a memref type into a list of LLVM IR types that will form the /// memref descriptor. If `unpackAggregates` is true the `sizes` and `strides` /// arrays in the descriptors are unpacked to individual index-typed elements, - /// else they are are kept as rank-sized arrays of index type. In particular, + /// else they are kept as rank-sized arrays of index type. In particular, /// the list will contain: /// - two pointers to the memref element type, followed by /// - an index-typed offset, followed by diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h --- a/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h +++ b/mlir/include/mlir/Dialect/Affine/Analysis/LoopAnalysis.h @@ -69,7 +69,7 @@ NestedPattern &vectorTransferMatcher); /// Checks whether the loop is structurally vectorizable and that all the LoadOp -/// and StoreOp matched have access indexing functions that are are either: +/// and StoreOp matched have access indexing functions that are either: /// 1. invariant along the loop induction variable created by 'loop'; /// 2. varying along at most one memory dimension. If such a unique dimension /// is found, it is written into `memRefDim`. diff --git a/mlir/include/mlir/Dialect/Affine/Utils.h b/mlir/include/mlir/Dialect/Affine/Utils.h --- a/mlir/include/mlir/Dialect/Affine/Utils.h +++ b/mlir/include/mlir/Dialect/Affine/Utils.h @@ -297,7 +297,7 @@ ValueRange dimValues, ValueRange symbolValues); /// Create a sequence of operations that implement the `affineMap` applied to -/// the given `operands` (as it it were an AffineApplyOp). +/// the given `operands` (as if it were an AffineApplyOp). Optional> expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgBase.td @@ -46,7 +46,7 @@ let hasOperationAttrVerify = 1; let hasConstantMaterializer = 1; let extraClassDeclaration = [{ - /// Attribute name used to to memoize indexing maps for named ops. + /// Attribute name used to memoize indexing maps for named ops. constexpr const static ::llvm::StringLiteral kMemoizedIndexingMapsAttrName = "linalg.memoized_indexing_maps"; diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h @@ -28,7 +28,7 @@ class LinalgOp; namespace detail { -/// Implementation of the method that that check if given operands +/// Implementation of the method that check if given operands /// can be dropped, i.e. the remaining operands can compute the loop /// bounds of the op. bool canOpOperandsBeDroppedImpl(linalg::LinalgOp linalgOp, diff --git a/mlir/include/mlir/Dialect/Quant/UniformSupport.h b/mlir/include/mlir/Dialect/Quant/UniformSupport.h --- a/mlir/include/mlir/Dialect/Quant/UniformSupport.h +++ b/mlir/include/mlir/Dialect/Quant/UniformSupport.h @@ -171,7 +171,7 @@ /// An utility class to quantize an attribute by the per-axis quantization /// parameters. The size of the quantization dim in the converted elements -/// attribute should matche the size of of scales/zeroPoints vectors in the +/// attribute should match the size of scales/zeroPoints vectors in the /// quantization parameters. class UniformQuantizedPerAxisValueConverter { public: diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -258,7 +258,7 @@ return getDimLevelType(tensor(b), index(b)); } - /// Gets the dimension number of the the `t`th tensor on `i`th loop. + /// Gets the dimension number of the `t`th tensor on `i`th loop. Optional getDimNum(unsigned t, unsigned i) const { assert(t < numTensors && i < numLoops); return loopIdxToDim[t][i]; diff --git a/mlir/include/mlir/Dialect/Tensor/Transforms/TransformUtils.h b/mlir/include/mlir/Dialect/Tensor/Transforms/TransformUtils.h --- a/mlir/include/mlir/Dialect/Tensor/Transforms/TransformUtils.h +++ b/mlir/include/mlir/Dialect/Tensor/Transforms/TransformUtils.h @@ -209,7 +209,7 @@ /// either take the place of the source, allowing for a new, simpler /// `collapse_shape` op to replace `op`, or the `collapse_shape` op will be /// completely replaced by the `extract_slice` result. Either way, `op` is -/// replaced and new new op is returned. +/// replaced and the new op is returned. /// /// ### Example: /// ``` diff --git a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Tensor/Transforms/Transforms.h @@ -30,7 +30,7 @@ OpBuilder &builder, tensor::ExtractSliceOp sliceOp, OpResult producerOp); /// Collects patterns to merge consecutive tensor.insert_slice/extract_slice -/// into one. These patterns are in in this separate entry point because the +/// into one. These patterns are in this separate entry point because the /// bufferization is sensitive over IR structure, particularly those /// tensor.extract_slice and tensor.insert_slice ops for creating the slices. void populateMergeConsecutiveInsertExtractSlicePatterns( diff --git a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td --- a/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td +++ b/mlir/include/mlir/Dialect/Transform/IR/TransformOps.td @@ -77,7 +77,7 @@ ^bb0(%arg0: !pdl.operation): // Try a fallible transformation. %0 = transform.fallible %arg0 // ... - // If succeeded, yield the the result of the transformation. + // If succeeded, yield the result of the transformation. transform.yield %0 : !pdl.operation }, { ^bb0(%arg0: !pdl.operation): diff --git a/mlir/include/mlir/IR/AttrTypeBase.td b/mlir/include/mlir/IR/AttrTypeBase.td --- a/mlir/include/mlir/IR/AttrTypeBase.td +++ b/mlir/include/mlir/IR/AttrTypeBase.td @@ -297,7 +297,7 @@ string cppType = type; // The C++ type of the accessor for this parameter. string cppAccessorType = !if(!empty(accessorType), type, accessorType); - // The C++ storage type of of this parameter if it is a reference, e.g. + // The C++ storage type of this parameter if it is a reference, e.g. // `std::string` for `StringRef` or `SmallVector` for `ArrayRef`. string cppStorageType = cppType; // The C++ code to convert from the storage type to the parameter type. diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h --- a/mlir/include/mlir/IR/BuiltinTypes.h +++ b/mlir/include/mlir/IR/BuiltinTypes.h @@ -441,7 +441,7 @@ /// canonical "contiguous" strides AffineExpr. Strides are multiplicative and /// once a dynamic dimension is encountered, all canonical strides become /// dynamic and need to be encoded with a different symbol. -/// For canonical strides expressions, the offset is always 0 and and fastest +/// For canonical strides expressions, the offset is always 0 and the fastest /// varying stride is always `1`. /// /// Examples: diff --git a/mlir/lib/Analysis/Presburger/PWMAFunction.cpp b/mlir/lib/Analysis/Presburger/PWMAFunction.cpp --- a/mlir/lib/Analysis/Presburger/PWMAFunction.cpp +++ b/mlir/lib/Analysis/Presburger/PWMAFunction.cpp @@ -244,7 +244,7 @@ // defined. // // `dom` here is guranteed to be disjoint from already added pieces - // because because the pieces added before are either: + // because the pieces added before are either: // - Subsets of the domain of other MAFs in `this`, which are guranteed // to be disjoint from `dom`, or // - They are one of the pieces added for `pieceB`, and we have been diff --git a/mlir/lib/Analysis/Presburger/Simplex.cpp b/mlir/lib/Analysis/Presburger/Simplex.cpp --- a/mlir/lib/Analysis/Presburger/Simplex.cpp +++ b/mlir/lib/Analysis/Presburger/Simplex.cpp @@ -188,7 +188,7 @@ /// greater, so A*y + b is always equal to or lexicographically greater than b. /// Thus, since we can attain x = b, that is the lexicographic minimum. /// -/// We have that that every column in A is lexicopositive, i.e., has at least +/// We have that every column in A is lexicopositive, i.e., has at least /// one non-zero element, with the first such element being positive. Since for /// the tableau to be consistent we must have non-negative sample values not /// only for the constraints but also for the variables, we also have x >= 0 and diff --git a/mlir/lib/Analysis/Presburger/Utils.cpp b/mlir/lib/Analysis/Presburger/Utils.cpp --- a/mlir/lib/Analysis/Presburger/Utils.cpp +++ b/mlir/lib/Analysis/Presburger/Utils.cpp @@ -82,7 +82,7 @@ /// 4q - i - j + 2 >= 0 <-- Lower bound for 'q' /// -4q + i + j >= 0 <-- Tight upper bound for 'q' /// -/// To extract floor divisions with tighter bounds, we assume that that the +/// To extract floor divisions with tighter bounds, we assume that the /// constraints are of the form: /// c <= expr - divisior * var <= divisor - 1, where 0 <= c <= divisor - 1 /// Rearranging, we have: diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -137,7 +137,7 @@ /// positional value. DenseMap valueToPosition; - /// The set of operation values whose whose location will be used for newly + /// The set of operation values whose location will be used for newly /// generated operations. SetVector locOps; }; diff --git a/mlir/lib/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.cpp b/mlir/lib/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.cpp --- a/mlir/lib/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.cpp +++ b/mlir/lib/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.cpp @@ -27,7 +27,7 @@ /// For example, the DAGs `A -> B -> C -> B -> A` and `A -> B -> C -> A` /// represent a noop within the IR, and thus the initial input values can be /// propagated. -/// The same does not hold for 'open' chains chains of casts, such as +/// The same does not hold for 'open' chains of casts, such as /// `A -> B -> C`. In this last case there is no cycle among the types and thus /// the conversion is incomplete. The same hold for 'closed' chains like /// `A -> B -> A`, but with the result of type `B` being used by some non-cast diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -1501,7 +1501,7 @@ xBorder = rewriter.create( loc, rewriter.getI32IntegerAttr(border[1])); - // Compute the the integer index and partial offset. + // Compute the integer index and partial offset. Value ix, iy, dx, dy; // x = x * scale_d + offset; // ix = floor(x / scale_n) diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp --- a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp @@ -617,8 +617,8 @@ // Return 'NoDependence' if loopDepth > numCommonLoops and if the ancestor // operation of 'srcAccess' does not properly dominate the ancestor // operation of 'dstAccess' in the same common operation block. - // Note: this check is skipped if 'allowRAR' is true, because because RAR - // deps can exist irrespective of lexicographic ordering b/w src and dst. + // Note: this check is skipped if 'allowRAR' is true, because RAR deps + // can exist irrespective of lexicographic ordering b/w src and dst. unsigned numCommonLoops = getNumCommonLoops(srcDomain, dstDomain); assert(loopDepth <= numCommonLoops + 1); if (!allowRAR && loopDepth > numCommonLoops && diff --git a/mlir/lib/Dialect/Affine/Utils/Utils.cpp b/mlir/lib/Dialect/Affine/Utils/Utils.cpp --- a/mlir/lib/Dialect/Affine/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Affine/Utils/Utils.cpp @@ -220,7 +220,7 @@ } /// Create a sequence of operations that implement the `affineMap` applied to -/// the given `operands` (as it it were an AffineApplyOp). +/// the given `operands` (as if it were an AffineApplyOp). Optional> mlir::expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp @@ -398,7 +398,7 @@ /// /// Op dominance can often be used to rule out potential conflicts such as /// "read" happens before "write". E.g., the following IR is not a RaW conflict -/// because the the read happens *before* the write. +/// because the read happens *before* the write. /// /// %0 = ... : tensor /// "reading_op"(%0) : tensor diff --git a/mlir/lib/Dialect/Func/Transforms/FuncConversions.cpp b/mlir/lib/Dialect/Func/Transforms/FuncConversions.cpp --- a/mlir/lib/Dialect/Func/Transforms/FuncConversions.cpp +++ b/mlir/lib/Dialect/Func/Transforms/FuncConversions.cpp @@ -141,7 +141,7 @@ TypeConverter &converter, bool returnOpAlwaysLegal) { // If this is a `return` and the user pass wants to convert/transform across - // function boundaries, then `converter` is invoked to check whether the the + // function boundaries, then `converter` is invoked to check whether the // `return` op is legal. if (isa(op) && !returnOpAlwaysLegal) return converter.isLegal(op); diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgDialect.cpp @@ -64,7 +64,7 @@ // LinalgDialect //===----------------------------------------------------------------------===// -/// Attribute name used to to memoize indexing maps for named ops. +/// Attribute name used to memoize indexing maps for named ops. constexpr const ::llvm::StringLiteral LinalgDialect::kMemoizedIndexingMapsAttrName; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Split.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Split.cpp @@ -40,7 +40,7 @@ sizesCopy[dimension] = size; offsetsCopy[dimension] = offset; - // Create the part as it it were a single tile. + // Create the part as if it were a single tile. SmallVector tiled = op.getTiledImplementation(b, offsetsCopy, sizesCopy); assert(tiled.size() == 1 && "expected a single result from tiling"); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1016,7 +1016,7 @@ /// sizes may turn out to be equal at runtime. bool hasSameTensorSize(Value beforePadding, tensor::ExtractSliceOp afterTrimming) const { - // If the input to tensor::PadOp is a CastOp, try with with both CastOp + // If the input to tensor::PadOp is a CastOp, try with both CastOp // result and CastOp operand. if (auto castOp = beforePadding.getDefiningOp()) if (hasSameTensorSize(castOp.getSource(), afterTrimming)) diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -537,7 +537,7 @@ } /// Determines whether MemRef_CastOp casts to a more dynamic version of the -/// source memref. This is useful to to fold a memref.cast into a consuming op +/// source memref. This is useful to fold a memref.cast into a consuming op /// and implement canonicalization patterns for ops in different dialects that /// may consume the results of memref.cast operations. Such foldable memref.cast /// operations are typically inserted as `view` and `subview` ops are diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -2178,7 +2178,7 @@ if (sourceType != insertSliceOp.getSourceType()) { OpBuilder::InsertionGuard g(rewriter); // The only difference between InsertSliceOp and ParallelInsertSliceOp - // is the the insertion point is just before the ParallelCombiningOp in + // is that the insertion point is just before the ParallelCombiningOp in // the parallel case. if (std::is_same::value) rewriter.setInsertionPoint(insertSliceOp->getParentOp()); @@ -2316,7 +2316,7 @@ // Insert the cast. OpBuilder::InsertionGuard g(rewriter); // The only difference between InsertSliceOp and ParallelInsertSliceOp is - // the the insertion point is just before the ParallelCombiningOp in the + // that the insertion point is just before the ParallelCombiningOp in the // parallel case. if (std::is_same::value) rewriter.setInsertionPoint(insertSliceOp->getParentOp()); diff --git a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp --- a/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/SplitPaddingPatterns.cpp @@ -24,7 +24,7 @@ using namespace mlir; -/// Returns true if the the given `attrOrValue` is a constant zero. +/// Returns true if the given `attrOrValue` is a constant zero. static bool isZero(OpFoldResult attrOrValue) { if (Optional val = getConstantIntValue(attrOrValue)) return *val == 0; diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -1388,7 +1388,7 @@ if (destinationRank > 0) { auto destinationType = extractOp.getResult().getType().cast(); for (int64_t i = 0; i < destinationRank; i++) { - // The lowest dimension of of the destination must match the lowest + // The lowest dimension of the destination must match the lowest // dimension of the shapecast op source. // TODO: This case could be support in a canonicalization pattern. if (getDimReverse(shapeCastOp.getSourceVectorType(), i) != diff --git a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp --- a/mlir/lib/Interfaces/DataLayoutInterfaces.cpp +++ b/mlir/lib/Interfaces/DataLayoutInterfaces.cpp @@ -479,7 +479,7 @@ return failure(); // Second, dispatch verifications of entry groups to types or dialects they - // are are associated with. + // are associated with. DenseMap types; DenseMap ids; spec.bucketEntriesByType(types, ids); diff --git a/mlir/test/Dialect/Affine/unroll.mlir b/mlir/test/Dialect/Affine/unroll.mlir --- a/mlir/test/Dialect/Affine/unroll.mlir +++ b/mlir/test/Dialect/Affine/unroll.mlir @@ -522,7 +522,7 @@ // UNROLL-BY-4-NEXT: %2 = "foo"() : () -> i32 // UNROLL-BY-4-NEXT: %3 = "foo"() : () -> i32 // UNROLL-BY-4-NEXT: } - // A cleanup loop will be be generated here. + // A cleanup loop will be generated here. // UNROLL-BY-4-NEXT: affine.for %arg2 = #map{{[0-9]*}}()[%arg0] to %arg0 { // UNROLL-BY-4-NEXT: %0 = "foo"() : () -> i32 // UNROLL-BY-4-NEXT: } diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir --- a/mlir/test/IR/parser.mlir +++ b/mlir/test/IR/parser.mlir @@ -1348,7 +1348,7 @@ // CHECK: [[VAL2:%.*]]:3 = "bar"([[VAL3:%.*]]) : (i64) -> (i1, i1, i1) // CHECK: [[VAL3]] = "baz"([[VAL2]]#0) : (i1) -> i64 test.graph_region { - // %1 OK here in in graph region. + // %1 OK here in graph region. %2:3 = "bar"(%1) : (i64) -> (i1,i1,i1) %1 = "baz"(%2#0) : (i1) -> (i64) } diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir --- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir +++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir @@ -208,7 +208,7 @@ %1 = llvm.icmp "slt" %arg0, %0 : i32 omp.parallel if(%1 : i1) { - // The "parallel" operation will be outlined, check the the function is + // The "parallel" operation will be outlined, check that the function is // produced. Inside that function, further allocas should be placed before // another "icmp". // CHECK: define diff --git a/mlir/test/lib/Reducer/MLIRTestReducer.cpp b/mlir/test/lib/Reducer/MLIRTestReducer.cpp --- a/mlir/test/lib/Reducer/MLIRTestReducer.cpp +++ b/mlir/test/lib/Reducer/MLIRTestReducer.cpp @@ -18,7 +18,7 @@ namespace { -/// This pass looks for for the presence of an operation with the name +/// This pass looks for the presence of an operation with the name /// "crashOp" in the input MLIR file and crashes the mlir-opt tool if the /// operation is found. struct TestReducer : public PassWrapper> { diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -2291,7 +2291,7 @@ auto *method = opClass.addMethod("void", "getCanonicalizationPatterns", kind, std::move(paramList)); - // If synthesizing the method, fill it it. + // If synthesizing the method, fill it. if (hasBody) { ERROR_IF_PRUNED(method, "getCanonicalizationPatterns", op); method->body() << " results.add(canonicalize);\n"; diff --git a/mlir/unittests/Analysis/Presburger/PresburgerSetTest.cpp b/mlir/unittests/Analysis/Presburger/PresburgerSetTest.cpp --- a/mlir/unittests/Analysis/Presburger/PresburgerSetTest.cpp +++ b/mlir/unittests/Analysis/Presburger/PresburgerSetTest.cpp @@ -525,7 +525,7 @@ // Triangle with vertices (0, 0), (5, 0), (15, 5). // Projected on x, it becomes [0, 13] U {15} as it becomes too narrow towards - // the apex and so does not have have any integer point at x = 14. + // the apex and so does not have any integer point at x = 14. // At x = 15, the apex is an integer point. PresburgerSet triangle2{ parseIntegerPolyhedronAndMakeLocals("(x,y) : (y >= 0, " diff --git a/mlir/unittests/TableGen/OpBuildGen.cpp b/mlir/unittests/TableGen/OpBuildGen.cpp --- a/mlir/unittests/TableGen/OpBuildGen.cpp +++ b/mlir/unittests/TableGen/OpBuildGen.cpp @@ -131,7 +131,7 @@ /// single variadic arg x /// {single variadic result, non-variadic result, multiple variadic results} /// -/// Specifically to test that that ODS framework does not generate ambiguous +/// Specifically to test that ODS framework does not generate ambiguous /// build() methods that fail to compile. /// Test build methods for an Op with a single varadic arg and a single diff --git a/openmp/libomptarget/DeviceRTL/include/Synchronization.h b/openmp/libomptarget/DeviceRTL/include/Synchronization.h --- a/openmp/libomptarget/DeviceRTL/include/Synchronization.h +++ b/openmp/libomptarget/DeviceRTL/include/Synchronization.h @@ -35,7 +35,7 @@ ///{ #pragma omp begin assumes ext_aligned_barrier -/// Synchronize all threads in a block, they are are reaching the same +/// Synchronize all threads in a block, they are reaching the same /// instruction (hence all threads in the block are "aligned"). __attribute__((noinline)) void threadsAligned(); diff --git a/openmp/libomptarget/plugins-nextgen/common/PluginInterface/GlobalHandler.cpp b/openmp/libomptarget/plugins-nextgen/common/PluginInterface/GlobalHandler.cpp --- a/openmp/libomptarget/plugins-nextgen/common/PluginInterface/GlobalHandler.cpp +++ b/openmp/libomptarget/plugins-nextgen/common/PluginInterface/GlobalHandler.cpp @@ -99,7 +99,7 @@ return Plugin::error("Unable to create ELF object for image %p", Image.getStart()); - // Search the ELF symbol using the the symbol name. + // Search the ELF symbol using the symbol name. auto SymOrErr = getELFSymbol(*ELFObj, ImageGlobal.getName()); if (!SymOrErr) return Plugin::error("Failed ELF lookup of global '%s': %s", diff --git a/openmp/runtime/src/kmp_affinity.cpp b/openmp/runtime/src/kmp_affinity.cpp --- a/openmp/runtime/src/kmp_affinity.cpp +++ b/openmp/runtime/src/kmp_affinity.cpp @@ -2728,7 +2728,7 @@ // Set the array sizes for the hierarchy layers static void __kmp_dispatch_set_hierarchy_values() { // Set the maximum number of L1's to number of cores - // Set the maximum number of L2's to to either number of cores / 2 for + // Set the maximum number of L2's to either number of cores / 2 for // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing // Or the number of cores for Intel(R) Xeon(R) processors // Set the maximum number of NUMA nodes and L3's to number of packages @@ -3223,7 +3223,7 @@ return false; } - // If the thread ids were not specified and we see entries entries that + // If the thread ids were not specified and we see entries that // are duplicates, start the loop over and assign the thread ids manually. assign_thread_ids = true; goto restart_radix_check; diff --git a/openmp/runtime/src/kmp_itt.inl b/openmp/runtime/src/kmp_itt.inl --- a/openmp/runtime/src/kmp_itt.inl +++ b/openmp/runtime/src/kmp_itt.inl @@ -438,7 +438,7 @@ KMP_BUILD_ASSERT(sizeof(kmp_team_t) >= bs_last_barrier); // This condition is a must (we would have zero divide otherwise). KMP_BUILD_ASSERT(sizeof(kmp_team_t) >= 2 * bs_last_barrier); - // More strong condition: make sure we have room at least for for two + // More strong condition: make sure we have room at least for two // different ids (for each barrier type). object = reinterpret_cast( (kmp_uintptr_t)(team) + diff --git a/openmp/runtime/src/kmp_lock.cpp b/openmp/runtime/src/kmp_lock.cpp --- a/openmp/runtime/src/kmp_lock.cpp +++ b/openmp/runtime/src/kmp_lock.cpp @@ -3809,7 +3809,7 @@ sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1)); table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table; // We cannot free the previous table now, since it may be in use by other - // threads. So save the pointer to the previous table in in the first + // threads. So save the pointer to the previous table in the first // element of the new table. All the tables will be organized into a list, // and could be freed when library shutting down. __kmp_user_lock_table.table = table; diff --git a/openmp/runtime/src/kmp_settings.cpp b/openmp/runtime/src/kmp_settings.cpp --- a/openmp/runtime/src/kmp_settings.cpp +++ b/openmp/runtime/src/kmp_settings.cpp @@ -1559,7 +1559,7 @@ static void __kmp_stg_parse_debug_buf(char const *name, char const *value, void *data) { __kmp_stg_parse_bool(name, value, &__kmp_debug_buf); - // !!! TODO: Move buffer initialization of of this file! It may works + // !!! TODO: Move buffer initialization of this file! It may works // incorrectly if KMP_DEBUG_BUF is parsed before KMP_DEBUG_BUF_LINES or // KMP_DEBUG_BUF_CHARS. if (__kmp_debug_buf) { diff --git a/polly/include/polly/CodeGen/RuntimeDebugBuilder.h b/polly/include/polly/CodeGen/RuntimeDebugBuilder.h --- a/polly/include/polly/CodeGen/RuntimeDebugBuilder.h +++ b/polly/include/polly/CodeGen/RuntimeDebugBuilder.h @@ -38,7 +38,7 @@ /// @return A global containing @p Str. static llvm::Value *getPrintableString(PollyIRBuilder &Builder, llvm::StringRef Str) { - // TODO: Get rid of magic number 4. It it NVPTX's constant address space and + // TODO: Get rid of magic number 4. It is NVPTX's constant address space and // works on X86 (CPU) only because its backend ignores the address space. return Builder.CreateGlobalStringPtr(Str, "", 4); } diff --git a/polly/include/polly/ScopInfo.h b/polly/include/polly/ScopInfo.h --- a/polly/include/polly/ScopInfo.h +++ b/polly/include/polly/ScopInfo.h @@ -509,7 +509,7 @@ /// Here not all iterations access the same memory location, but iterations /// for which j = 0 holds do. After lifting the equality check in ScopBuilder, /// subsequent transformations do not only need check if a statement is - /// reduction like, but they also need to verify that that the reduction + /// reduction like, but they also need to verify that the reduction /// property is only exploited for statement instances that load from and /// store to the same data location. Doing so at dependence analysis time /// could allow us to handle the above example. diff --git a/polly/lib/Transform/MatmulOptimizer.cpp b/polly/lib/Transform/MatmulOptimizer.cpp --- a/polly/lib/Transform/MatmulOptimizer.cpp +++ b/polly/lib/Transform/MatmulOptimizer.cpp @@ -566,7 +566,7 @@ /// /// We create the BLIS macro-kernel by applying a combination of tiling /// of dimensions of the band node and interchanging of two innermost -/// modified dimensions. The values of of MacroKernelParams's fields are used +/// modified dimensions. The values of MacroKernelParams's fields are used /// as tile sizes. /// /// @param Node The schedule node to be modified. diff --git a/third-party/benchmark/include/benchmark/benchmark.h b/third-party/benchmark/include/benchmark/benchmark.h --- a/third-party/benchmark/include/benchmark/benchmark.h +++ b/third-party/benchmark/include/benchmark/benchmark.h @@ -971,7 +971,7 @@ // Have "setup" and/or "teardown" invoked once for every benchmark run. // If the benchmark is multi-threaded (will run in k threads concurrently), - // the setup callback will be be invoked exactly once (not k times) before + // the setup callback will be invoked exactly once (not k times) before // each run with k threads. Time allowing (e.g. for a short benchmark), there // may be multiple such runs per benchmark, each run with its own // "setup"/"teardown". diff --git a/third-party/benchmark/test/complexity_test.cc b/third-party/benchmark/test/complexity_test.cc --- a/third-party/benchmark/test/complexity_test.cc +++ b/third-party/benchmark/test/complexity_test.cc @@ -26,7 +26,7 @@ AddCases( TC_ConsoleOut, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, - {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. + {"^%bigo_name", MR_Not}, // Assert we didn't only matched a name. {"^%rms_name %rms %rms[ ]*$", MR_Next}}); AddCases( TC_JSONOut,