diff --git a/bolt/lib/Passes/IndirectCallPromotion.cpp b/bolt/lib/Passes/IndirectCallPromotion.cpp --- a/bolt/lib/Passes/IndirectCallPromotion.cpp +++ b/bolt/lib/Passes/IndirectCallPromotion.cpp @@ -416,15 +416,15 @@ ++TotalIndexBasedCandidates; - auto ErrorOrMemAccesssProfile = + auto ErrorOrMemAccessProfile = BC.MIB->tryGetAnnotationAs(*MemLocInstr, "MemoryAccessProfile"); - if (!ErrorOrMemAccesssProfile) { + if (!ErrorOrMemAccessProfile) { DEBUG_VERBOSE(1, dbgs() << "BOLT-INFO: ICP no memory profiling data found\n"); return JumpTableInfoType(); } - MemoryAccessProfile &MemAccessProfile = ErrorOrMemAccesssProfile.get(); + MemoryAccessProfile &MemAccessProfile = ErrorOrMemAccessProfile.get(); uint64_t ArrayStart; if (DispExpr) { @@ -670,15 +670,15 @@ }); // Try to get value profiling data for the method load instruction. - auto ErrorOrMemAccesssProfile = + auto ErrorOrMemAccessProfile = BC.MIB->tryGetAnnotationAs(*MethodFetchInsns.back(), "MemoryAccessProfile"); - if (!ErrorOrMemAccesssProfile) { + if (!ErrorOrMemAccessProfile) { DEBUG_VERBOSE(1, dbgs() << "BOLT-INFO: ICP no memory profiling data found\n"); return MethodInfoType(); } - MemoryAccessProfile &MemAccessProfile = ErrorOrMemAccesssProfile.get(); + MemoryAccessProfile &MemAccessProfile = ErrorOrMemAccessProfile.get(); // Find the vtable that each method belongs to. std::map MethodToVtable; diff --git a/bolt/lib/Passes/ReorderData.cpp b/bolt/lib/Passes/ReorderData.cpp --- a/bolt/lib/Passes/ReorderData.cpp +++ b/bolt/lib/Passes/ReorderData.cpp @@ -186,14 +186,14 @@ for (const BinaryBasicBlock &BB : BF) { for (const MCInst &Inst : BB) { - auto ErrorOrMemAccesssProfile = + auto ErrorOrMemAccessProfile = BC.MIB->tryGetAnnotationAs( Inst, "MemoryAccessProfile"); - if (!ErrorOrMemAccesssProfile) + if (!ErrorOrMemAccessProfile) continue; const MemoryAccessProfile &MemAccessProfile = - ErrorOrMemAccesssProfile.get(); + ErrorOrMemAccessProfile.get(); for (const AddressAccess &AccessInfo : MemAccessProfile.AddressAccessInfo) { if (BinaryData *BD = AccessInfo.MemoryObject) { @@ -242,14 +242,14 @@ continue; for (const MCInst &Inst : BB) { - auto ErrorOrMemAccesssProfile = + auto ErrorOrMemAccessProfile = BC.MIB->tryGetAnnotationAs( Inst, "MemoryAccessProfile"); - if (!ErrorOrMemAccesssProfile) + if (!ErrorOrMemAccessProfile) continue; const MemoryAccessProfile &MemAccessProfile = - ErrorOrMemAccesssProfile.get(); + ErrorOrMemAccessProfile.get(); for (const AddressAccess &AccessInfo : MemAccessProfile.AddressAccessInfo) { if (AccessInfo.MemoryObject) diff --git a/bolt/lib/Passes/RetpolineInsertion.cpp b/bolt/lib/Passes/RetpolineInsertion.cpp --- a/bolt/lib/Passes/RetpolineInsertion.cpp +++ b/bolt/lib/Passes/RetpolineInsertion.cpp @@ -44,19 +44,17 @@ cl::Hidden, cl::cat(BoltCategory)); -cl::opt -R11Availability("r11-availability", - cl::desc("determine the availablity of r11 before indirect branches"), - cl::init(RetpolineInsertion::AvailabilityOptions::NEVER), - cl::values( - clEnumValN(RetpolineInsertion::AvailabilityOptions::NEVER, - "never", "r11 not available"), - clEnumValN(RetpolineInsertion::AvailabilityOptions::ALWAYS, - "always", "r11 avaialable before calls and jumps"), - clEnumValN(RetpolineInsertion::AvailabilityOptions::ABI, - "abi", "r11 avaialable before calls but not before jumps")), - cl::ZeroOrMore, - cl::cat(BoltCategory)); +cl::opt R11Availability( + "r11-availability", + cl::desc("determine the availability of r11 before indirect branches"), + cl::init(RetpolineInsertion::AvailabilityOptions::NEVER), + cl::values(clEnumValN(RetpolineInsertion::AvailabilityOptions::NEVER, + "never", "r11 not available"), + clEnumValN(RetpolineInsertion::AvailabilityOptions::ALWAYS, + "always", "r11 avaialable before calls and jumps"), + clEnumValN(RetpolineInsertion::AvailabilityOptions::ABI, "abi", + "r11 avaialable before calls but not before jumps")), + cl::ZeroOrMore, cl::cat(BoltCategory)); } // namespace opts diff --git a/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp b/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp --- a/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp +++ b/bolt/lib/RuntimeLibs/HugifyRuntimeLibrary.cpp @@ -46,7 +46,7 @@ errs() << "BOLT-ERROR: -hot-text should be applied to binaries with " "pre-compiled manual hugify support, while -hugify will add hugify " - "support automatcally. These two options cannot both be present.\n"; + "support automatically. These two options cannot both be present.\n"; exit(1); } // After the check, we set HotText to be true because automated hugify support diff --git a/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp b/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp --- a/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp +++ b/clang-tools-extra/clang-tidy/modernize/PassByValueCheck.cpp @@ -138,7 +138,7 @@ const int ParamIdx = Param->getFunctionScopeIndex(); const CXXRecordDecl *Record = Ctor->getParent(); - // Check whether a ctor `C` forms a pair with `Ctor` under the aforementionned + // Check whether a ctor `C` forms a pair with `Ctor` under the aforementioned // rules. const auto IsRValueOverload = [&Ctor, ParamIdx](const CXXConstructorDecl *C) { if (C == Ctor || C->isDeleted() || diff --git a/clang-tools-extra/clang-tidy/performance/TypePromotionInMathFnCheck.h b/clang-tools-extra/clang-tidy/performance/TypePromotionInMathFnCheck.h --- a/clang-tools-extra/clang-tidy/performance/TypePromotionInMathFnCheck.h +++ b/clang-tools-extra/clang-tidy/performance/TypePromotionInMathFnCheck.h @@ -19,7 +19,7 @@ /// Finds calls to C math library functions with implicit float to double /// promotions. /// -/// For example, warns on ::sin(0.f), because this funciton's parameter is a +/// For example, warns on ::sin(0.f), because this function's parameter is a /// double. You probably meant to call std::sin(0.f) (in C++), or sinf(0.f) (in /// C). /// diff --git a/clang-tools-extra/clangd/CodeComplete.h b/clang-tools-extra/clangd/CodeComplete.h --- a/clang-tools-extra/clangd/CodeComplete.h +++ b/clang-tools-extra/clangd/CodeComplete.h @@ -142,9 +142,9 @@ /// CompletionScore is NameMatch * pow(Base, Prediction). /// The optimal value of Base largely depends on the semantics of the model /// and prediction score (e.g. algorithm used during training, number of - /// trees, etc.). Usually if the range of Prediciton is [-20, 20] then a Base + /// trees, etc.). Usually if the range of Prediction is [-20, 20] then a Base /// in [1.2, 1.7] works fine. - /// Semantics: E.g. For Base = 1.3, if the Prediciton score reduces by 2.6 + /// Semantics: E.g. For Base = 1.3, if the Prediction score reduces by 2.6 /// points then completion score reduces by 50% or 1.3^(-2.6). float DecisionForestBase = 1.3f; }; diff --git a/clang-tools-extra/clangd/ConfigFragment.h b/clang-tools-extra/clangd/ConfigFragment.h --- a/clang-tools-extra/clangd/ConfigFragment.h +++ b/clang-tools-extra/clangd/ConfigFragment.h @@ -163,8 +163,8 @@ /// Flags added by the same CompileFlags entry will not be removed. std::vector> Remove; - /// Directory to search for compilation database (compile_comands.json etc). - /// Valid values are: + /// Directory to search for compilation database (compile_commands.json + /// etc). Valid values are: /// - A single path to a directory (absolute, or relative to the fragment) /// - Ancestors: search all parent directories (the default) /// - None: do not use a compilation database, just default flags. diff --git a/clang-tools-extra/clangd/DumpAST.cpp b/clang-tools-extra/clangd/DumpAST.cpp --- a/clang-tools-extra/clangd/DumpAST.cpp +++ b/clang-tools-extra/clangd/DumpAST.cpp @@ -111,7 +111,7 @@ // Attr just uses a weird method name. Maybe we should fix it instead? SourceRange getSourceRange(const Attr *Node) { return Node->getRange(); } - // Kind is usualy the class name, without the suffix ("Type" etc). + // Kind is usually the class name, without the suffix ("Type" etc). // Where there's a set of variants instead, we use the 'Kind' enum values. std::string getKind(const Decl *D) { return D->getDeclKindName(); } diff --git a/clang-tools-extra/clangd/HeuristicResolver.cpp b/clang-tools-extra/clangd/HeuristicResolver.cpp --- a/clang-tools-extra/clangd/HeuristicResolver.cpp +++ b/clang-tools-extra/clangd/HeuristicResolver.cpp @@ -120,8 +120,8 @@ return {}; if (const auto *BT = BaseType->getAs()) { // If BaseType is the type of a dependent expression, it's just - // represented as BultinType::Dependent which gives us no information. We - // can get further by analyzing the depedent expression. + // represented as BuiltinType::Dependent which gives us no information. We + // can get further by analyzing the dependent expression. Expr *Base = ME->isImplicitAccess() ? nullptr : ME->getBase(); if (Base && BT->getKind() == BuiltinType::Dependent) { BaseType = resolveExprToType(Base); diff --git a/clang-tools-extra/clangd/IncludeCleaner.cpp b/clang-tools-extra/clangd/IncludeCleaner.cpp --- a/clang-tools-extra/clangd/IncludeCleaner.cpp +++ b/clang-tools-extra/clangd/IncludeCleaner.cpp @@ -286,7 +286,7 @@ return false; } for (auto &Filter : Cfg.Diagnostics.Includes.IgnoreHeader) { - // Convert the path to Unix slashes and try to match aginast the fiilter. + // Convert the path to Unix slashes and try to match against the filter. llvm::SmallString<64> Path(Inc.Resolved); llvm::sys::path::native(Path, llvm::sys::path::Style::posix); if (Filter(Inc.Resolved)) { diff --git a/clang-tools-extra/clangd/InlayHints.cpp b/clang-tools-extra/clangd/InlayHints.cpp --- a/clang-tools-extra/clangd/InlayHints.cpp +++ b/clang-tools-extra/clangd/InlayHints.cpp @@ -383,7 +383,7 @@ if (!Cfg.InlayHints.Parameters || Args.size() == 0 || !Callee) return; - // If the anchor location comes from a macro defintion, there's nowhere to + // If the anchor location comes from a macro definition, there's nowhere to // put hints. if (!AST.getSourceManager().getTopMacroCallerLoc(Anchor).isFileID()) return; diff --git a/clang-tools-extra/clangd/Quality.cpp b/clang-tools-extra/clangd/Quality.cpp --- a/clang-tools-extra/clangd/Quality.cpp +++ b/clang-tools-extra/clangd/Quality.cpp @@ -570,7 +570,7 @@ DecisionForestScores Scores; // Exponentiating DecisionForest prediction makes the score of each tree a // multiplciative boost (like NameMatch). This allows us to weigh the - // prediciton score and NameMatch appropriately. + // prediction score and NameMatch appropriately. Scores.ExcludingName = pow(Base, Evaluate(E)); // Following cases are not part of the generated training dataset: // - Symbols with `NeedsFixIts`. diff --git a/clang-tools-extra/clangd/SemanticHighlighting.cpp b/clang-tools-extra/clangd/SemanticHighlighting.cpp --- a/clang-tools-extra/clangd/SemanticHighlighting.cpp +++ b/clang-tools-extra/clangd/SemanticHighlighting.cpp @@ -925,7 +925,7 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, HighlightingModifier K) { switch (K) { case HighlightingModifier::Declaration: - return OS << "decl"; // abbrevation for common case + return OS << "decl"; // abbreviation for common case default: return OS << toSemanticTokenModifier(K); } diff --git a/clang-tools-extra/clangd/benchmarks/CompletionModel/DecisionForestBenchmark.cpp b/clang-tools-extra/clangd/benchmarks/CompletionModel/DecisionForestBenchmark.cpp --- a/clang-tools-extra/clangd/benchmarks/CompletionModel/DecisionForestBenchmark.cpp +++ b/clang-tools-extra/clangd/benchmarks/CompletionModel/DecisionForestBenchmark.cpp @@ -61,7 +61,7 @@ return Examples; } -void runDecisionForestPrediciton(const std::vector Examples) { +void runDecisionForestPrediction(const std::vector Examples) { for (const Example &E : Examples) Evaluate(E); } @@ -72,7 +72,7 @@ State.PauseTiming(); const std::vector Examples = generateRandomDataset(1000000); State.ResumeTiming(); - runDecisionForestPrediciton(Examples); + runDecisionForestPrediction(Examples); } } BENCHMARK(decisionForestPredict); diff --git a/clang-tools-extra/clangd/index/CanonicalIncludes.h b/clang-tools-extra/clangd/index/CanonicalIncludes.h --- a/clang-tools-extra/clangd/index/CanonicalIncludes.h +++ b/clang-tools-extra/clangd/index/CanonicalIncludes.h @@ -78,7 +78,7 @@ /// - export: this is common and potentially interesting, there are three cases: /// * Points to a public header (common): we can suppress include2 if you /// already have include1. Only marginally useful. -/// * Points to a private header annotated with `private` (somewhat commmon): +/// * Points to a private header annotated with `private` (somewhat common): /// Not incrementally useful as we support private. /// * Points to a private header without pragmas (rare). This is a reversed /// private pragma, and is valuable but too rare to be worthwhile. diff --git a/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp b/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp --- a/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp +++ b/clang-tools-extra/clangd/unittests/TUSchedulerTests.cpp @@ -1035,7 +1035,7 @@ // Starts handling the update action and blocks until the // first preamble is built. ASTAction::RunningAction, - // Afterwqards it builds an AST for that preamble to publish + // Afterwards it builds an AST for that preamble to publish // diagnostics. ASTAction::Building, // Then goes idle. diff --git a/clang-tools-extra/clangd/unittests/XRefsTests.cpp b/clang-tools-extra/clangd/unittests/XRefsTests.cpp --- a/clang-tools-extra/clangd/unittests/XRefsTests.cpp +++ b/clang-tools-extra/clangd/unittests/XRefsTests.cpp @@ -1801,7 +1801,7 @@ } } -TEST(FindImplementations, CaptureDefintion) { +TEST(FindImplementations, CaptureDefinition) { llvm::StringRef Test = R"cpp( struct Base { virtual void F^oo(); diff --git a/clang-tools-extra/pseudo/gen/Main.cpp b/clang-tools-extra/pseudo/gen/Main.cpp --- a/clang-tools-extra/pseudo/gen/Main.cpp +++ b/clang-tools-extra/pseudo/gen/Main.cpp @@ -65,7 +65,7 @@ // Mangles a symbol name into a valid identifier. // // These follow names in the grammar fairly closely: -// nonterminal: `ptr-declartor` becomes `ptr_declarator`; +// nonterminal: `ptr-declarator` becomes `ptr_declarator`; // punctuator: `,` becomes `COMMA`; // keyword: `INT` becomes `INT`; // terminal: `IDENTIFIER` becomes `IDENTIFIER`; diff --git a/clang-tools-extra/pseudo/lib/cxx/cxx.bnf b/clang-tools-extra/pseudo/lib/cxx/cxx.bnf --- a/clang-tools-extra/pseudo/lib/cxx/cxx.bnf +++ b/clang-tools-extra/pseudo/lib/cxx/cxx.bnf @@ -1,7 +1,7 @@ # This is a C++ grammar from the C++ standard [1]. # # The grammar is a superset of the true grammar requring semantic constraints to -# resolve ambiguties. The grammar is context-free and ambiguous (beyond the +# resolve ambiguities. The grammar is context-free and ambiguous (beyond the # limit of LR(k)). We use general parsing algorithm (e.g GLR) to handle the # grammar and generate a transition table which is used to drive the parsing. # diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp --- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/virtual-class-destructor.cpp @@ -143,7 +143,7 @@ // CHECK-FIXES: virtual ~PublicNonVirtualBaseClass() {} }; -class PublicNonVirtualNonBaseClass { // OK accoring to C.35, since this class does not have any virtual methods. +class PublicNonVirtualNonBaseClass { // OK according to C.35, since this class does not have any virtual methods. void f(); public: diff --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py --- a/clang/bindings/python/clang/cindex.py +++ b/clang/bindings/python/clang/cindex.py @@ -1152,7 +1152,7 @@ # Objective-C's @synchronized statement. CursorKind.OBJC_AT_SYNCHRONIZED_STMT = CursorKind(220) -# Objective-C's autorealease pool statement. +# Objective-C's autorelease pool statement. CursorKind.OBJC_AUTORELEASE_POOL_STMT = CursorKind(221) # Objective-C's for collection statement. diff --git a/clang/cmake/caches/MultiDistributionExample.cmake b/clang/cmake/caches/MultiDistributionExample.cmake --- a/clang/cmake/caches/MultiDistributionExample.cmake +++ b/clang/cmake/caches/MultiDistributionExample.cmake @@ -1,5 +1,5 @@ # This file sets up a CMakeCache for a simple build with multiple distributions. -# Note that for a real distribution, you likely want to perform a boostrap +# Note that for a real distribution, you likely want to perform a bootstrap # build; see clang/cmake/caches/DistributionExample.cmake and the # BuildingADistribution documentation for details. This cache file doesn't # demonstrate bootstrapping so it can focus on the configuration details diff --git a/clang/docs/ClangFormat.rst b/clang/docs/ClangFormat.rst --- a/clang/docs/ClangFormat.rst +++ b/clang/docs/ClangFormat.rst @@ -317,7 +317,7 @@ -v, --verbose be more verbose, ineffective without -i -style STYLE formatting style to apply (LLVM, GNU, Google, Chromium, Microsoft, Mozilla, WebKit) -fallback-style FALLBACK_STYLE - The name of the predefined style used as afallback in case clang-format is invoked with-style=file, but can not + The name of the predefined style used as a fallback in case clang-format is invoked with-style=file, but can not find the .clang-formatfile to use. -binary BINARY location of binary to use for clang-format diff --git a/clang/docs/JSONCompilationDatabase.rst b/clang/docs/JSONCompilationDatabase.rst --- a/clang/docs/JSONCompilationDatabase.rst +++ b/clang/docs/JSONCompilationDatabase.rst @@ -29,7 +29,7 @@ Supported Systems ================= -Clang has the ablity to generate compilation database fragments via +Clang has the ability to generate compilation database fragments via the :option:`-MJ argument >`. You can concatenate those fragments together between ``[`` and ``]`` to create a compilation database. diff --git a/clang/docs/LanguageExtensions.rst b/clang/docs/LanguageExtensions.rst --- a/clang/docs/LanguageExtensions.rst +++ b/clang/docs/LanguageExtensions.rst @@ -3006,7 +3006,7 @@ Once this builtin is evaluated in a constexpr context, it is erroneous to use it in an instantiation which changes its value. -In order to produce the unique name, the current implementation of the bultin +In order to produce the unique name, the current implementation of the builtin uses Itanium mangling even if the host compilation uses a different name mangling scheme at runtime. The mangler marks all the lambdas required to name the SYCL kernel and emits a stable local ordering of the respective lambdas. diff --git a/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst b/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst --- a/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst +++ b/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst @@ -210,7 +210,7 @@ The compiler invocation is a shell command that could be used to compile the TU-s main source file. The mapping from absolute source file paths of a TU to lists of compilation command segments used to compile said TU are given in YAML format referred to as `invocation list`, and must be passed as an -analyer-config argument. +analyzer-config argument. The index, which maps function USR names to source files containing them must also be generated by the `clang-extdef-mapping`. Entries in the index must *not* have an `.ast` suffix if the goal is to use On-demand analysis, as that extension signals that the entry is to be used as an PCH-dump. diff --git a/clang/include/clang/AST/DeclCXX.h b/clang/include/clang/AST/DeclCXX.h --- a/clang/include/clang/AST/DeclCXX.h +++ b/clang/include/clang/AST/DeclCXX.h @@ -1916,7 +1916,7 @@ ExplicitSpecifier getExplicitSpecifier() { return ExplicitSpec; } const ExplicitSpecifier getExplicitSpecifier() const { return ExplicitSpec; } - /// Return true if the declartion is already resolved to be explicit. + /// Return true if the declaration is already resolved to be explicit. bool isExplicit() const { return ExplicitSpec.isExplicit(); } /// Get the template for which this guide performs deduction. @@ -2512,7 +2512,7 @@ return getCanonicalDecl()->getExplicitSpecifierInternal(); } - /// Return true if the declartion is already resolved to be explicit. + /// Return true if the declaration is already resolved to be explicit. bool isExplicit() const { return getExplicitSpecifier().isExplicit(); } /// Iterates through the member/base initializer list. @@ -2796,7 +2796,7 @@ return getCanonicalDecl()->ExplicitSpec; } - /// Return true if the declartion is already resolved to be explicit. + /// Return true if the declaration is already resolved to be explicit. bool isExplicit() const { return getExplicitSpecifier().isExplicit(); } void setExplicitSpecifier(ExplicitSpecifier ES) { ExplicitSpec = ES; } diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h --- a/clang/include/clang/AST/OpenMPClause.h +++ b/clang/include/clang/AST/OpenMPClause.h @@ -8391,14 +8391,14 @@ /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. - /// \param N Number of allocators asssociated with the clause. + /// \param N Number of allocators associated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. - /// \param N Number of allocators asssociated with the clause. + /// \param N Number of allocators associated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), @@ -8492,14 +8492,14 @@ /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. - /// \param N Number of locators asssociated with the clause. + /// \param N Number of locators associated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. - /// \param N Number of locators asssociated with the clause. + /// \param N Number of locators associated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause(llvm::omp::OMPC_affinity, diff --git a/clang/include/clang/Analysis/ConstructionContext.h b/clang/include/clang/Analysis/ConstructionContext.h --- a/clang/include/clang/Analysis/ConstructionContext.h +++ b/clang/include/clang/Analysis/ConstructionContext.h @@ -542,7 +542,7 @@ /// of being immediately copied by an elidable copy/move constructor. /// For example, T t = T(123); includes a temporary T(123) that is immediately /// copied to variable t. In such cases the elidable copy can (but not -/// necessarily should) be omitted ("elided") accodring to the rules of the +/// necessarily should) be omitted ("elided") according to the rules of the /// language; the constructor would then construct variable t directly. /// This construction context contains information of the elidable constructor /// and its respective construction context. diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h b/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h --- a/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h +++ b/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h @@ -134,7 +134,7 @@ /// getBlockDataMap - Retrieves the internal map between CFGBlocks and /// dataflow values. If the dataflow analysis operates in the forward /// direction, the values correspond to the dataflow values at the start - /// of the block. Otherwise, for a backward analysis, the values correpsond + /// of the block. Otherwise, for a backward analysis, the values correspond /// to the dataflow values at the end of the block. BlockDataMapTy& getBlockDataMap() { return BlockDataMap; } const BlockDataMapTy& getBlockDataMap() const { return BlockDataMap; } diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -4389,7 +4389,7 @@ applied to a definition. If ``allow_templates`` is given, template function definitions are considered as specializations of existing or assumed template declarations with the same name. The template parameters for the base functions -are used to instantiate the specialization. If ``bind_to_declartion`` is given, +are used to instantiate the specialization. If ``bind_to_declaration`` is given, apply the same variant rules to function declarations. This allows the user to override declarations with only a function declaration. }]; @@ -6106,7 +6106,7 @@ does_not_exist x; // error: use of unresolved 'using_if_exists' -The C++ spelling of the attribte (`[[clang::using_if_exists]]`) is also +The C++ spelling of the attribute (`[[clang::using_if_exists]]`) is also supported as a clang extension, since ISO C++ doesn't support attributes in this position. If the entity referred to by the using-declaration is found by name lookup, the attribute has no effect. This attribute is useful for libraries @@ -6444,7 +6444,7 @@ command-line arguments. - ``used`` only zeros call-used registers used in the function. By ``used``, we mean a register whose contents have been set or referenced in the function. -- ``used-gpr`` only zeros call-used GPR registers used in the funciton. +- ``used-gpr`` only zeros call-used GPR registers used in the function. - ``used-arg`` only zeros call-used registers used to pass arguments to the function. - ``used-gpr-arg`` only zeros call-used GPR registers used to pass arguments to @@ -6456,7 +6456,7 @@ - ``all-gpr-arg`` zeros all call-used GPR registers used to pass arguments to the function. -The default for the attribute is contolled by the ``-fzero-call-used-regs`` +The default for the attribute is controlled by the ``-fzero-call-used-regs`` flag. .. _Return-Oriented Programming: https://en.wikipedia.org/wiki/Return-oriented_programming diff --git a/clang/include/clang/Basic/BuiltinsVE.def b/clang/include/clang/Basic/BuiltinsVE.def --- a/clang/include/clang/Basic/BuiltinsVE.def +++ b/clang/include/clang/Basic/BuiltinsVE.def @@ -15,7 +15,7 @@ # define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS) #endif -// The format of this database is decribed in clang/Basic/Builtins.def. +// The format of this database is described in clang/Basic/Builtins.def. BUILTIN(__builtin_ve_vl_pack_f32p, "ULifC*fC*", "n") BUILTIN(__builtin_ve_vl_pack_f32a, "ULifC*", "n") diff --git a/clang/include/clang/Basic/SourceManager.h b/clang/include/clang/Basic/SourceManager.h --- a/clang/include/clang/Basic/SourceManager.h +++ b/clang/include/clang/Basic/SourceManager.h @@ -1918,11 +1918,11 @@ } }; -/// SourceManager and necessary depdencies (e.g. VFS, FileManager) for a single -/// in-memorty file. +/// SourceManager and necessary dependencies (e.g. VFS, FileManager) for a +/// single in-memorty file. class SourceManagerForFile { public: - /// Creates SourceManager and necessary depdencies (e.g. VFS, FileManager). + /// Creates SourceManager and necessary dependencies (e.g. VFS, FileManager). /// The main file in the SourceManager will be \p FileName with \p Content. SourceManagerForFile(StringRef FileName, StringRef Content); diff --git a/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h b/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h --- a/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h +++ b/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h @@ -153,7 +153,7 @@ /// Serialize an Objective-C container record. void serializeObjCContainerRecord(const ObjCContainerRecord &Record); - /// Serialize a macro defintion record. + /// Serialize a macro definition record. void serializeMacroDefinitionRecord(const MacroDefinitionRecord &Record); /// Serialize a typedef record. diff --git a/clang/include/clang/Lex/DependencyDirectivesScanner.h b/clang/include/clang/Lex/DependencyDirectivesScanner.h --- a/clang/include/clang/Lex/DependencyDirectivesScanner.h +++ b/clang/include/clang/Lex/DependencyDirectivesScanner.h @@ -87,7 +87,7 @@ /// Represents a directive that's lexed as part of the dependency directives /// scanning. It's used to track various preprocessor directives that could -/// potentially have an effect on the depedencies. +/// potentially have an effect on the dependencies. struct Directive { ArrayRef Tokens; diff --git a/clang/include/clang/Sema/DeclSpec.h b/clang/include/clang/Sema/DeclSpec.h --- a/clang/include/clang/Sema/DeclSpec.h +++ b/clang/include/clang/Sema/DeclSpec.h @@ -1343,7 +1343,7 @@ /// DeclSpec for the function with the qualifier related info. DeclSpec *MethodQualifiers; - /// AtttibuteFactory for the MethodQualifiers. + /// AttributeFactory for the MethodQualifiers. AttributeFactory *QualAttrFactory; union { diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -306,7 +306,7 @@ /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// - /// Clients should be very careful when using this funciton, as it stores a + /// Clients should be very careful when using this function, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. /// @@ -4018,13 +4018,10 @@ DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( - ExprResult &SrcExpr, - bool DoFunctionPointerConverion = false, - bool Complain = false, - SourceRange OpRangeForComplaining = SourceRange(), - QualType DestTypeForComplaining = QualType(), - unsigned DiagIDForComplaining = 0); - + ExprResult &SrcExpr, bool DoFunctionPointerConversion = false, + bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), + QualType DestTypeForComplaining = QualType(), + unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h --- a/clang/include/clang/Serialization/ASTReader.h +++ b/clang/include/clang/Serialization/ASTReader.h @@ -381,7 +381,7 @@ /// The AST file was written by a different version of Clang. VersionMismatch, - /// The AST file was writtten with a different language/target + /// The AST file was written with a different language/target /// configuration. ConfigurationMismatch, diff --git a/clang/include/clang/Serialization/SourceLocationEncoding.h b/clang/include/clang/Serialization/SourceLocationEncoding.h --- a/clang/include/clang/Serialization/SourceLocationEncoding.h +++ b/clang/include/clang/Serialization/SourceLocationEncoding.h @@ -10,7 +10,7 @@ // the size of typical serialized files. Storing them efficiently is important. // // We use integers optimized by VBR-encoding, because: -// - when abbrevations cannot be used, VBR6 encoding is our only choice +// - when abbreviations cannot be used, VBR6 encoding is our only choice // - in the worst case a SourceLocation can be ~any 32-bit number, but in // practice they are highly predictable // diff --git a/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h --- a/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h +++ b/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h @@ -153,7 +153,7 @@ /// Constructs a CheckerManager without requiring an AST. No checker /// registration will take place. Only useful when one needs to print the - /// help flags through CheckerRegistryData, and the AST is unavalaible. + /// help flags through CheckerRegistryData, and the AST is unavailable. CheckerManager(AnalyzerOptions &AOptions, const LangOptions &LangOpts, DiagnosticsEngine &Diags, ArrayRef plugins); diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h --- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h +++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h @@ -720,11 +720,11 @@ /// returned, which is better than nothing but does not represent /// the actual behavior of the program. The Idx parameter is used if we /// construct an array of objects. In that case it points to the index - /// of the continous memory region. + /// of the continuous memory region. /// E.g.: /// For `int arr[4]` this index can be 0,1,2,3. /// For `int arr2[3][3]` this index can be 0,1,...,7,8. - /// A multi-dimensional array is also a continous memory location in a + /// A multi-dimensional array is also a continuous memory location in a /// row major order, so for arr[0][0] Idx is 0 and for arr[2][2] Idx is 8. SVal computeObjectUnderConstruction(const Expr *E, ProgramStateRef State, const LocationContext *LCtx, diff --git a/clang/include/clang/Tooling/Core/Replacement.h b/clang/include/clang/Tooling/Core/Replacement.h --- a/clang/include/clang/Tooling/Core/Replacement.h +++ b/clang/include/clang/Tooling/Core/Replacement.h @@ -301,7 +301,7 @@ // applied. Replacements getCanonicalReplacements() const; - // If `R` and all existing replacements are order-indepedent, then merge it + // If `R` and all existing replacements are order-independent, then merge it // with `Replaces` and returns the merged replacements; otherwise, returns an // error. llvm::Expected diff --git a/clang/include/clang/Tooling/Syntax/Tree.h b/clang/include/clang/Tooling/Syntax/Tree.h --- a/clang/include/clang/Tooling/Syntax/Tree.h +++ b/clang/include/clang/Tooling/Syntax/Tree.h @@ -10,7 +10,7 @@ // - tree nodes correspond to language grammar constructs. // // The tree is initially built from an AST. Each node of a newly built tree -// covers a continous subrange of expanded tokens (i.e. tokens after +// covers a continuous subrange of expanded tokens (i.e. tokens after // preprocessing), the specific tokens coverered are stored in the leaf nodes of // a tree. A post-order traversal of a tree will visit leaf nodes in an order // corresponding the original order of expanded tokens. diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -2485,7 +2485,7 @@ } /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a -/// type, in characters, before alignment adustments. This method does +/// type, in characters, before alignment adjustments. This method does /// not work on incomplete types. CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp --- a/clang/lib/Analysis/CFG.cpp +++ b/clang/lib/Analysis/CFG.cpp @@ -1617,7 +1617,7 @@ } /// createBlock - Used to lazily create blocks that are connected -/// to the current (global) succcessor. +/// to the current (global) successor. CFGBlock *CFGBuilder::createBlock(bool add_successor) { CFGBlock *B = cfg->createBlock(); if (add_successor && Succ) diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -11934,7 +11934,7 @@ "unexpected BPF builtin"); // A sequence number, injected into IR builtin functions, to - // prevent CSE given the only difference of the funciton + // prevent CSE given the only difference of the function // may just be the debuginfo metadata. static uint32_t BuiltinSeqNum; @@ -16235,7 +16235,7 @@ auto Pair = EmitAtomicCompareExchange( LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(), llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true); - // Unlike c11's atomic_compare_exchange, accroding to + // Unlike c11's atomic_compare_exchange, according to // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp // > In either case, the contents of the memory location specified by addr // > are copied into the memory location specified by old_val_addr. diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -3530,7 +3530,7 @@ switch (RetAI.getKind()) { case ABIArgInfo::InAlloca: - // Aggregrates get evaluated directly into the destination. Sometimes we + // Aggregates get evaluated directly into the destination. Sometimes we // need to return the sret value in a register, though. assert(hasAggregateEvaluationKind(RetTy)); if (RetAI.getInAllocaSRet()) { @@ -3558,7 +3558,7 @@ break; } case TEK_Aggregate: - // Do nothing; aggregrates get evaluated directly into the destination. + // Do nothing; aggregates get evaluated directly into the destination. break; case TEK_Scalar: { LValueBaseInfo BaseInfo; diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -4655,7 +4655,7 @@ CodeGenFunction &CGF, QualType &KmpDependInfoTy, const OMPTaskDataTy::DependData &Data) { assert(Data.DepKind == OMPC_DEPEND_depobj && - "Expected depobj dependecy kind."); + "Expected depobj dependency kind."); SmallVector Sizes; SmallVector SizeLVals; ASTContext &C = CGF.getContext(); @@ -4695,7 +4695,7 @@ const OMPTaskDataTy::DependData &Data, Address DependenciesArray) { assert(Data.DepKind == OMPC_DEPEND_depobj && - "Expected depobj dependecy kind."); + "Expected depobj dependency kind."); llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy); { OMPIteratorGeneratorScope IteratorScope( @@ -4751,7 +4751,8 @@ llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0); llvm::Value *NumOfRegularWithIterators = llvm::ConstantInt::get(CGF.IntPtrTy, 0); - // Calculate number of depobj dependecies and regular deps with the iterators. + // Calculate number of depobj dependencies and regular deps with the + // iterators. for (const OMPTaskDataTy::DependData &D : Dependencies) { if (D.DepKind == OMPC_DEPEND_depobj) { SmallVector Sizes = @@ -4825,7 +4826,7 @@ emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I], DependenciesArray); } - // Copy regular dependecies with iterators. + // Copy regular dependencies with iterators. LValue PosLVal = CGF.MakeAddrLValue( CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType()); CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal); diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -1168,7 +1168,7 @@ namespace { LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); -/// Enum for accesseing the reserved_2 field of the ident_t struct. +/// Enum for accessing the reserved_2 field of the ident_t struct. enum ModeFlagsTy : unsigned { /// Bit set to 1 when in SPMD mode. KMP_IDENT_SPMD_MODE = 0x01, diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp --- a/clang/lib/CrossTU/CrossTranslationUnit.cpp +++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp @@ -458,7 +458,7 @@ return llvm::make_error(index_error_code::missing_definition); } - // Search in the index for the filename where the definition of FuncitonName + // Search in the index for the filename where the definition of FunctionName // resides. if (llvm::Expected FoundForFile = getASTUnitForFile(NameFileMap[FunctionName], DisplayCTUProgress)) { diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -5986,8 +5986,8 @@ } } else if (IsOpenMPDevice) { // When compiling for the OpenMP device we want protected visibility by - // default. This prevents the device from accidenally preempting code on the - // host, makes the system more robust, and improves performance. + // default. This prevents the device from accidentally preempting code on + // the host, makes the system more robust, and improves performance. CmdArgs.push_back("-fvisibility"); CmdArgs.push_back("protected"); } diff --git a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp --- a/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp +++ b/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp @@ -146,19 +146,19 @@ if (Avail.isDefault()) return None; - Object Availbility; - serializeObject(Availbility, "introducedVersion", + Object Availability; + serializeObject(Availability, "introducedVersion", serializeSemanticVersion(Avail.Introduced)); - serializeObject(Availbility, "deprecatedVersion", + serializeObject(Availability, "deprecatedVersion", serializeSemanticVersion(Avail.Deprecated)); - serializeObject(Availbility, "obsoletedVersion", + serializeObject(Availability, "obsoletedVersion", serializeSemanticVersion(Avail.Obsoleted)); if (Avail.isUnavailable()) - Availbility["isUnconditionallyUnavailable"] = true; + Availability["isUnconditionallyUnavailable"] = true; if (Avail.isUnconditionallyDeprecated()) - Availbility["isUnconditionallyDeprecated"] = true; + Availability["isUnconditionallyDeprecated"] = true; - return Availbility; + return Availability; } /// Get the language name string for interface language references. diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h --- a/clang/lib/Format/UnwrappedLineParser.h +++ b/clang/lib/Format/UnwrappedLineParser.h @@ -208,7 +208,7 @@ // // NextTok specifies the next token. A null pointer NextTok is supported, and // signifies either the absence of a next token, or that the next token - // shouldn't be taken into accunt for the analysis. + // shouldn't be taken into account for the analysis. void distributeComments(const SmallVectorImpl &Comments, const FormatToken *NextTok); diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp --- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp +++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp @@ -6723,7 +6723,7 @@ std::string &Result, ArrayRef Ivars, ObjCInterfaceDecl *CDecl) { - // FIXME. visibilty of offset symbols may have to be set; for Darwin + // FIXME. visibility of offset symbols may have to be set; for Darwin // this is what happens: /** if (Ivar->getAccessControl() == ObjCIvarDecl::Private || diff --git a/clang/lib/Headers/arm_acle.h b/clang/lib/Headers/arm_acle.h --- a/clang/lib/Headers/arm_acle.h +++ b/clang/lib/Headers/arm_acle.h @@ -277,7 +277,7 @@ /* * 9.4 Saturating intrinsics * - * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag + * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag * intrinsics are implemented and the flag is enabled. */ /* 9.4.1 Width-specified saturation intrinsics */ diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h --- a/clang/lib/Headers/opencl-c.h +++ b/clang/lib/Headers/opencl-c.h @@ -17847,15 +17847,13 @@ uint skip_block_partition_type, uint skip_motion_vector_mask, ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment, intel_sub_group_avc_sic_payload_t payload); -intel_sub_group_avc_sic_payload_t __ovld -intel_sub_group_avc_sic_configure_ipe( - uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty, +intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe( + uchar luma_intra_partition_mask, uchar intra_neighbour_availability, uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel, uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels, uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload); -intel_sub_group_avc_sic_payload_t __ovld -intel_sub_group_avc_sic_configure_ipe( - uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty, +intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe( + uchar luma_intra_partition_mask, uchar intra_neighbour_availability, uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel, uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels, ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel, diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp --- a/clang/lib/Lex/Lexer.cpp +++ b/clang/lib/Lex/Lexer.cpp @@ -3368,7 +3368,7 @@ // recover after having emitted a diagnostic. if (!LooseMatch) return llvm::None; - // We do not offer missspelled character names suggestions here + // We do not offer misspelled character names suggestions here // as the set of what would be a valid suggestion depends on context, // and we should not make invalid suggestions. } diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp --- a/clang/lib/Parse/ParseStmt.cpp +++ b/clang/lib/Parse/ParseStmt.cpp @@ -956,7 +956,7 @@ /// StmtResult Parser::ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags) { - assert(Tok.is(tok::l_brace) && "Not a compount stmt!"); + assert(Tok.is(tok::l_brace) && "Not a compound stmt!"); // Enter a scope to hold everything within the compound stmt. Compound // statements can always hold declarations. diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp --- a/clang/lib/Sema/SemaCodeComplete.cpp +++ b/clang/lib/Sema/SemaCodeComplete.cpp @@ -1212,7 +1212,7 @@ enum class OverloadCompare { BothViable, Dominates, Dominated }; // Will Candidate ever be called on the object, when overloaded with Incumbent? // Returns Dominates if Candidate is always called, Dominated if Incumbent is -// always called, BothViable if either may be called dependending on arguments. +// always called, BothViable if either may be called depending on arguments. // Precondition: must actually be overloads! static OverloadCompare compareOverloads(const CXXMethodDecl &Candidate, const CXXMethodDecl &Incumbent, diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -3031,7 +3031,7 @@ continue; } else if (isa(NewAttribute)) { // We allow to add OMP[Begin]DeclareVariantAttr to be added to - // declarations after defintions. + // declarations after definitions. ++I; continue; } @@ -10564,7 +10564,7 @@ /// (from the current #pragma code-seg value). /// /// \param FD Function being declared. -/// \param IsDefinition Whether it is a definition or just a declarartion. +/// \param IsDefinition Whether it is a definition or just a declaration. /// \returns A CodeSegAttr or SectionAttr to apply to the function or /// nullptr if no attribute should be added. Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, @@ -15487,8 +15487,8 @@ // Because typo correction is expensive, only do it if the implicit // function declaration is going to be treated as an error. // - // Perform the corection before issuing the main diagnostic, as some consumers - // use typo-correction callbacks to enhance the main diagnostic. + // Perform the correction before issuing the main diagnostic, as some + // consumers use typo-correction callbacks to enhance the main diagnostic. if (S && !ExternCPrev && (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error)) { DeclFilterCCC CCC{}; @@ -19211,7 +19211,7 @@ else Diag(PrevDecl->getLocation(), diag::warn_redefine_extname_not_applied) << /*Variable*/(isa(PrevDecl) ? 0 : 1) << PrevDecl; - // Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers. + // Otherwise, add a label attribute to ExtnameUndeclaredIdentifiers. } else (void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr)); } diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp --- a/clang/lib/Sema/SemaDeclObjC.cpp +++ b/clang/lib/Sema/SemaDeclObjC.cpp @@ -3754,7 +3754,7 @@ /// DiagnoseDuplicateIvars - /// Check for duplicate ivars in the entire class at the start of -/// \@implementation. This becomes necesssary because class extension can +/// \@implementation. This becomes necessary because class extension can /// add ivars to a class in random order which will not be known until /// class's \@implementation is seen. void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -12569,7 +12569,7 @@ /// Returns false if resolveAddressOfSingleOverloadCandidate fails. /// Otherwise, returns true. This may emit diagnostics and return true. bool Sema::resolveAndFixAddressOfSingleOverloadCandidate( - ExprResult &SrcExpr, bool DoFunctionPointerConverion) { + ExprResult &SrcExpr, bool DoFunctionPointerConversion) { Expr *E = SrcExpr.get(); assert(E->getType() == Context.OverloadTy && "SrcExpr must be an overload"); @@ -12585,7 +12585,7 @@ DiagnoseUseOfDecl(Found, E->getExprLoc()); CheckAddressOfMemberAccess(E, DAP); Expr *Fixed = FixOverloadedFunctionReference(E, DAP, Found); - if (DoFunctionPointerConverion && Fixed->getType()->isFunctionType()) + if (DoFunctionPointerConversion && Fixed->getType()->isFunctionType()) SrcExpr = DefaultFunctionArrayConversion(Fixed, /*Diagnose=*/false); else SrcExpr = Fixed; @@ -12687,10 +12687,9 @@ // expression, regardless of whether or not it succeeded. Always // returns true if 'complain' is set. bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization( - ExprResult &SrcExpr, bool doFunctionPointerConverion, - bool complain, SourceRange OpRangeForComplaining, - QualType DestTypeForComplaining, - unsigned DiagIDForComplaining) { + ExprResult &SrcExpr, bool doFunctionPointerConversion, bool complain, + SourceRange OpRangeForComplaining, QualType DestTypeForComplaining, + unsigned DiagIDForComplaining) { assert(SrcExpr.get()->getType() == Context.OverloadTy); OverloadExpr::FindResult ovl = OverloadExpr::find(SrcExpr.get()); @@ -12731,7 +12730,7 @@ FixOverloadedFunctionReference(SrcExpr.get(), found, fn); // If desired, do function-to-pointer decay. - if (doFunctionPointerConverion) { + if (doFunctionPointerConversion) { SingleFunctionExpression = DefaultFunctionArrayLvalueConversion(SingleFunctionExpression.get()); if (SingleFunctionExpression.isInvalid()) { diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -187,7 +187,7 @@ // This trait is responsible for storing the index of the element that is to be // constructed in the next iteration. As a result a CXXConstructExpr is only -// stored if it is array type. Also the index is the index of the continous +// stored if it is array type. Also the index is the index of the continuous // memory region, which is important for multi-dimensional arrays. E.g:: int // arr[2][2]; assume arr[1][1] will be the next element under construction, so // the index is 3. diff --git a/clang/lib/Tooling/AllTUsExecution.cpp b/clang/lib/Tooling/AllTUsExecution.cpp --- a/clang/lib/Tooling/AllTUsExecution.cpp +++ b/clang/lib/Tooling/AllTUsExecution.cpp @@ -121,7 +121,7 @@ [&](std::string Path) { Log("[" + std::to_string(Count()) + "/" + TotalNumStr + "] Processing file " + Path); - // Each thread gets an indepent copy of a VFS to allow different + // Each thread gets an independent copy of a VFS to allow different // concurrent working directories. IntrusiveRefCntPtr FS = llvm::vfs::createPhysicalFileSystem(); diff --git a/clang/lib/Tooling/Core/Replacement.cpp b/clang/lib/Tooling/Core/Replacement.cpp --- a/clang/lib/Tooling/Core/Replacement.cpp +++ b/clang/lib/Tooling/Core/Replacement.cpp @@ -270,7 +270,7 @@ assert(R.getLength() == 0); // `I` is also an insertion, `R` and `I` conflict. if (I->getLength() == 0) { - // Check if two insertions are order-indepedent: if inserting them in + // Check if two insertions are order-independent: if inserting them in // either order produces the same text, they are order-independent. if ((R.getReplacementText() + I->getReplacementText()).str() != (I->getReplacementText() + R.getReplacementText()).str()) @@ -319,7 +319,7 @@ Replaces.insert(R); } else { // `I` overlaps with `R`. We need to check `R` against all overlapping - // replacements to see if they are order-indepedent. If they are, merge `R` + // replacements to see if they are order-independent. If they are, merge `R` // with them and replace them with the merged replacements. auto MergeBegin = I; auto MergeEnd = std::next(I); diff --git a/clang/lib/Tooling/Syntax/Tokens.cpp b/clang/lib/Tooling/Syntax/Tokens.cpp --- a/clang/lib/Tooling/Syntax/Tokens.cpp +++ b/clang/lib/Tooling/Syntax/Tokens.cpp @@ -751,7 +751,7 @@ SpelledTokens[NextSpelled].location() <= KnownEnd) ++NextSpelled; FlushMapping(); // Emits [NextSpelled, KnownEnd] - // Now the loop contitues and will emit (KnownEnd, Target). + // Now the loop continues and will emit (KnownEnd, Target). } else { ++NextSpelled; } diff --git a/clang/test/CodeGen/vectorcall.c b/clang/test/CodeGen/vectorcall.c --- a/clang/test/CodeGen/vectorcall.c +++ b/clang/test/CodeGen/vectorcall.c @@ -110,7 +110,7 @@ // Vectorcall in both architectures allows passing of an HVA as long as there is room, // even if it is not one of the first 6 arguments. First pass puts p4 into a // register on both. p9 ends up in a register in x86 only. Second pass puts p1 -// in a register, does NOT put p7 in a register (since theres no room), then puts +// in a register, does NOT put p7 in a register (since there's no room), then puts // p8 in a register. void __vectorcall HVAAnywhere(struct HFA2 p1, int p2, int p3, float p4, int p5, int p6, struct HFA4 p7, struct HFA2 p8, float p9){} // X32: define dso_local x86_vectorcallcc void @"\01HVAAnywhere@@88"(%struct.HFA2 inreg %p1.coerce, i32 inreg noundef %p2, i32 inreg noundef %p3, float inreg noundef %p4, i32 noundef %p5, i32 noundef %p6, %struct.HFA4* noundef %p7, %struct.HFA2 inreg %p8.coerce, float inreg noundef %p9) diff --git a/clang/test/CodeGenCXX/target-features-error.cpp b/clang/test/CodeGenCXX/target-features-error.cpp --- a/clang/test/CodeGenCXX/target-features-error.cpp +++ b/clang/test/CodeGenCXX/target-features-error.cpp @@ -77,7 +77,7 @@ //expected-error@+1{{'~CtorAndDTor' requires target feature 'avx512f'}} c2.~CtorAndDTor(); } - // FIXME: These need to be given a line number, however theres no good way + // FIXME: These need to be given a line number, however there's no good way // to get to the SourceLocation of anything by the time we're doing CodeGen // cleanups. //expected-error@*{{'~CtorAndDTor' requires target feature 'avx512f'}} diff --git a/clang/test/SemaCXX/builtin-align-cxx.cpp b/clang/test/SemaCXX/builtin-align-cxx.cpp --- a/clang/test/SemaCXX/builtin-align-cxx.cpp +++ b/clang/test/SemaCXX/builtin-align-cxx.cpp @@ -200,7 +200,7 @@ static_assert(__builtin_align_down(&align32array[7], 4) == &align32array[4], ""); static_assert(__builtin_align_down(&align32array[8], 4) == &align32array[8], ""); -// Achiving the same thing using casts to uintptr_t is not allowed: +// Achieving the same thing using casts to uintptr_t is not allowed: static_assert((char *)((__UINTPTR_TYPE__)&align32array[7] & ~3) == &align32array[4], ""); // expected-error{{not an integral constant expression}} static_assert(__builtin_align_down(&align32array[1], 4) == &align32array[0], ""); diff --git a/clang/test/SemaOpenCL/usm-address-spaces-conversions.cl b/clang/test/SemaOpenCL/usm-address-spaces-conversions.cl --- a/clang/test/SemaOpenCL/usm-address-spaces-conversions.cl +++ b/clang/test/SemaOpenCL/usm-address-spaces-conversions.cl @@ -7,7 +7,7 @@ * spaces: global_device and global_host that are a subset of __global address * space. As ISO/IEC TR 18037 5.1.3 declares - it's possible to implicitly * convert a subset address space to a superset address space, while conversion - * in a reversed direction could be achived only with an explicit cast */ + * in a reversed direction could be achieved only with an explicit cast */ #ifdef GENERIC #define AS_COMP __generic diff --git a/clang/tools/clang-shlib/CMakeLists.txt b/clang/tools/clang-shlib/CMakeLists.txt --- a/clang/tools/clang-shlib/CMakeLists.txt +++ b/clang/tools/clang-shlib/CMakeLists.txt @@ -15,7 +15,7 @@ endif() if (BUILD_SHARED_LIBS) # If we are building static libraries, then we don't need to add the static - # libraries as a depedency, because we are already linking against the + # libraries as a dependency, because we are already linking against the # individual object files. list(APPEND _DEPS $) endif() diff --git a/clang/tools/include-mapping/gen_std.py b/clang/tools/include-mapping/gen_std.py --- a/clang/tools/include-mapping/gen_std.py +++ b/clang/tools/include-mapping/gen_std.py @@ -8,7 +8,7 @@ #===------------------------------------------------------------------------===# """gen_std.py is a tool to generate a lookup table (from qualified names to -include headers) for C/C++ Standard Library symbols by parsing archieved HTML +include headers) for C/C++ Standard Library symbols by parsing archived HTML files from cppreference. The generated files are located in clang/include/Tooling/Inclusions. diff --git a/clang/tools/scan-build-py/lib/libear/ear.c b/clang/tools/scan-build-py/lib/libear/ear.c --- a/clang/tools/scan-build-py/lib/libear/ear.c +++ b/clang/tools/scan-build-py/lib/libear/ear.c @@ -447,7 +447,7 @@ pthread_mutex_unlock(&mutex); } -/* update environment assure that chilren processes will copy the desired +/* update environment assure that children processes will copy the desired * behaviour */ static int bear_capture_env_t(bear_env_t *env) { @@ -602,4 +602,4 @@ free((void *)*it); } free((void *)in); -} \ No newline at end of file +} diff --git a/clang/unittests/AST/StructuralEquivalenceTest.cpp b/clang/unittests/AST/StructuralEquivalenceTest.cpp --- a/clang/unittests/AST/StructuralEquivalenceTest.cpp +++ b/clang/unittests/AST/StructuralEquivalenceTest.cpp @@ -186,7 +186,7 @@ } // This test is disabled for now. -// FIXME Whether this is equivalent is dependendant on the target. +// FIXME Whether this is equivalent is dependent on the target. TEST_F(StructuralEquivalenceTest, DISABLED_CharVsSignedChar) { auto Decls = makeNamedDecls("char foo;", "signed char foo;", Lang_CXX03); EXPECT_FALSE(testStructuralMatch(Decls)); diff --git a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp --- a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp +++ b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp @@ -655,7 +655,7 @@ CallFunctionF)); } - // Depedent calls don't match. + // Dependent calls don't match. EXPECT_TRUE( notMatches("void f(int); template void g(T t) { f(t); }", CallFunctionF)); diff --git a/clang/unittests/Tooling/SourceCodeTest.cpp b/clang/unittests/Tooling/SourceCodeTest.cpp --- a/clang/unittests/Tooling/SourceCodeTest.cpp +++ b/clang/unittests/Tooling/SourceCodeTest.cpp @@ -255,7 +255,7 @@ Visitor.runOverAnnotated(R"cpp( #define ATTR __attribute__((deprecated("message"))) $r[[ATTR - // Commment. + // Comment. int x;]])cpp"); } @@ -410,7 +410,7 @@ Visit(R"cpp( #define ATTR __attribute__((deprecated("message"))) $r[[ATTR - // Commment. + // Comment. int x;]])cpp"); } diff --git a/clang/utils/TableGen/NeonEmitter.cpp b/clang/utils/TableGen/NeonEmitter.cpp --- a/clang/utils/TableGen/NeonEmitter.cpp +++ b/clang/utils/TableGen/NeonEmitter.cpp @@ -441,7 +441,7 @@ /// Return the index that parameter PIndex will sit at /// in a generated function call. This is often just PIndex, /// but may not be as things such as multiple-vector operands - /// and sret parameters need to be taken into accont. + /// and sret parameters need to be taken into account. unsigned getGeneratedParamIdx(unsigned PIndex) { unsigned Idx = 0; if (getReturnType().getNumVectors() > 1) diff --git a/clang/utils/analyzer/SATest.py b/clang/utils/analyzer/SATest.py --- a/clang/utils/analyzer/SATest.py +++ b/clang/utils/analyzer/SATest.py @@ -321,7 +321,7 @@ dock_parser.add_argument("--clang-dir", action="store", default="", help="Path to find/install LLVM installation.") dock_parser.add_argument("rest", nargs=argparse.REMAINDER, default=[], - help="Additionall args that will be forwarded " + help="Additional args that will be forwarded " "to the docker's entrypoint.") dock_parser.set_defaults(func=docker) diff --git a/clang/utils/analyzer/exploded-graph-rewriter.py b/clang/utils/analyzer/exploded-graph-rewriter.py --- a/clang/utils/analyzer/exploded-graph-rewriter.py +++ b/clang/utils/analyzer/exploded-graph-rewriter.py @@ -388,7 +388,7 @@ # Also on Windows macros __FILE__ produces specific delimiters `\` # and a directory or file may starts with the letter `l`. # Find all `\l` (like `,\l`, `}\l`, `[\l`) except `\\l`, - # because the literal as a rule containes multiple `\` before `\l`. + # because the literal as a rule contains multiple `\` before `\l`. node_label = re.sub(r'(?
  • scan-build: scan-build is the high-level command line utility for running the analyzer
  • -
  • scan-view: scan-view a companion comannd line +
  • scan-view: scan-view a companion command line utility to scan-build, scan-view is used to view analysis results generated by scan-build. There is an option that one can pass to scan-build to cause scan-view to diff --git a/compiler-rt/cmake/Modules/AddCompilerRT.cmake b/compiler-rt/cmake/Modules/AddCompilerRT.cmake --- a/compiler-rt/cmake/Modules/AddCompilerRT.cmake +++ b/compiler-rt/cmake/Modules/AddCompilerRT.cmake @@ -521,7 +521,7 @@ # when linking, not the compiler. Here, we hack it to use the compiler # because we want to use -fsanitize flags. - # Only add CMAKE_EXE_LINKER_FLAGS when in a standalone bulid. + # Only add CMAKE_EXE_LINKER_FLAGS when in a standalone build. # Or else CMAKE_EXE_LINKER_FLAGS contains flags for build compiler of Clang/llvm. # This might not be the same as what the COMPILER_RT_TEST_COMPILER supports. # eg: the build compiler use lld linker and we build clang with default ld linker diff --git a/compiler-rt/cmake/Modules/CompilerRTCompile.cmake b/compiler-rt/cmake/Modules/CompilerRTCompile.cmake --- a/compiler-rt/cmake/Modules/CompilerRTCompile.cmake +++ b/compiler-rt/cmake/Modules/CompilerRTCompile.cmake @@ -57,7 +57,7 @@ endfunction() # Compile a source into an object file with COMPILER_RT_TEST_COMPILER using -# a provided compile flags and dependenices. +# a provided compile flags and dependencies. # clang_compile( # CFLAGS # DEPS ) diff --git a/compiler-rt/lib/builtins/arm/adddf3vfp.S b/compiler-rt/lib/builtins/arm/adddf3vfp.S --- a/compiler-rt/lib/builtins/arm/adddf3vfp.S +++ b/compiler-rt/lib/builtins/arm/adddf3vfp.S @@ -11,7 +11,7 @@ // double __adddf3vfp(double a, double b) { return a + b; } // // Adds two double precision floating point numbers using the Darwin -// calling convention where double arguments are passsed in GPR pairs +// calling convention where double arguments are passed in GPR pairs .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/addsf3vfp.S b/compiler-rt/lib/builtins/arm/addsf3vfp.S --- a/compiler-rt/lib/builtins/arm/addsf3vfp.S +++ b/compiler-rt/lib/builtins/arm/addsf3vfp.S @@ -12,7 +12,7 @@ // extern float __addsf3vfp(float a, float b); // // Adds two single precision floating point numbers using the Darwin -// calling convention where single arguments are passsed in GPRs +// calling convention where single arguments are passed in GPRs // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/divdf3vfp.S b/compiler-rt/lib/builtins/arm/divdf3vfp.S --- a/compiler-rt/lib/builtins/arm/divdf3vfp.S +++ b/compiler-rt/lib/builtins/arm/divdf3vfp.S @@ -12,7 +12,7 @@ // extern double __divdf3vfp(double a, double b); // // Divides two double precision floating point numbers using the Darwin -// calling convention where double arguments are passsed in GPR pairs +// calling convention where double arguments are passed in GPR pairs // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/divsf3vfp.S b/compiler-rt/lib/builtins/arm/divsf3vfp.S --- a/compiler-rt/lib/builtins/arm/divsf3vfp.S +++ b/compiler-rt/lib/builtins/arm/divsf3vfp.S @@ -12,7 +12,7 @@ // extern float __divsf3vfp(float a, float b); // // Divides two single precision floating point numbers using the Darwin -// calling convention where single arguments are passsed like 32-bit ints. +// calling convention where single arguments are passed like 32-bit ints. // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/eqdf2vfp.S b/compiler-rt/lib/builtins/arm/eqdf2vfp.S --- a/compiler-rt/lib/builtins/arm/eqdf2vfp.S +++ b/compiler-rt/lib/builtins/arm/eqdf2vfp.S @@ -11,7 +11,7 @@ // extern int __eqdf2vfp(double a, double b); // // Returns one iff a == b and neither is NaN. -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. .syntax unified diff --git a/compiler-rt/lib/builtins/arm/eqsf2vfp.S b/compiler-rt/lib/builtins/arm/eqsf2vfp.S --- a/compiler-rt/lib/builtins/arm/eqsf2vfp.S +++ b/compiler-rt/lib/builtins/arm/eqsf2vfp.S @@ -12,7 +12,7 @@ // extern int __eqsf2vfp(float a, float b); // // Returns one iff a == b and neither is NaN. -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/gedf2vfp.S b/compiler-rt/lib/builtins/arm/gedf2vfp.S --- a/compiler-rt/lib/builtins/arm/gedf2vfp.S +++ b/compiler-rt/lib/builtins/arm/gedf2vfp.S @@ -12,7 +12,7 @@ // extern int __gedf2vfp(double a, double b); // // Returns one iff a >= b and neither is NaN. -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/gesf2vfp.S b/compiler-rt/lib/builtins/arm/gesf2vfp.S --- a/compiler-rt/lib/builtins/arm/gesf2vfp.S +++ b/compiler-rt/lib/builtins/arm/gesf2vfp.S @@ -12,7 +12,7 @@ // extern int __gesf2vfp(float a, float b); // // Returns one iff a >= b and neither is NaN. -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/gtdf2vfp.S b/compiler-rt/lib/builtins/arm/gtdf2vfp.S --- a/compiler-rt/lib/builtins/arm/gtdf2vfp.S +++ b/compiler-rt/lib/builtins/arm/gtdf2vfp.S @@ -12,7 +12,7 @@ // extern double __gtdf2vfp(double a, double b); // // Returns one iff a > b and neither is NaN. -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/gtsf2vfp.S b/compiler-rt/lib/builtins/arm/gtsf2vfp.S --- a/compiler-rt/lib/builtins/arm/gtsf2vfp.S +++ b/compiler-rt/lib/builtins/arm/gtsf2vfp.S @@ -12,7 +12,7 @@ // extern int __gtsf2vfp(float a, float b); // // Returns one iff a > b and neither is NaN. -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/ledf2vfp.S b/compiler-rt/lib/builtins/arm/ledf2vfp.S --- a/compiler-rt/lib/builtins/arm/ledf2vfp.S +++ b/compiler-rt/lib/builtins/arm/ledf2vfp.S @@ -12,7 +12,7 @@ // extern double __ledf2vfp(double a, double b); // // Returns one iff a <= b and neither is NaN. -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/lesf2vfp.S b/compiler-rt/lib/builtins/arm/lesf2vfp.S --- a/compiler-rt/lib/builtins/arm/lesf2vfp.S +++ b/compiler-rt/lib/builtins/arm/lesf2vfp.S @@ -12,7 +12,7 @@ // extern int __lesf2vfp(float a, float b); // // Returns one iff a <= b and neither is NaN. -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/ltdf2vfp.S b/compiler-rt/lib/builtins/arm/ltdf2vfp.S --- a/compiler-rt/lib/builtins/arm/ltdf2vfp.S +++ b/compiler-rt/lib/builtins/arm/ltdf2vfp.S @@ -12,7 +12,7 @@ // extern double __ltdf2vfp(double a, double b); // // Returns one iff a < b and neither is NaN. -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/ltsf2vfp.S b/compiler-rt/lib/builtins/arm/ltsf2vfp.S --- a/compiler-rt/lib/builtins/arm/ltsf2vfp.S +++ b/compiler-rt/lib/builtins/arm/ltsf2vfp.S @@ -12,7 +12,7 @@ // extern int __ltsf2vfp(float a, float b); // // Returns one iff a < b and neither is NaN. -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/muldf3vfp.S b/compiler-rt/lib/builtins/arm/muldf3vfp.S --- a/compiler-rt/lib/builtins/arm/muldf3vfp.S +++ b/compiler-rt/lib/builtins/arm/muldf3vfp.S @@ -12,7 +12,7 @@ // extern double __muldf3vfp(double a, double b); // // Multiplies two double precision floating point numbers using the Darwin -// calling convention where double arguments are passsed in GPR pairs +// calling convention where double arguments are passed in GPR pairs // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/mulsf3vfp.S b/compiler-rt/lib/builtins/arm/mulsf3vfp.S --- a/compiler-rt/lib/builtins/arm/mulsf3vfp.S +++ b/compiler-rt/lib/builtins/arm/mulsf3vfp.S @@ -12,7 +12,7 @@ // extern float __mulsf3vfp(float a, float b); // // Multiplies two single precision floating point numbers using the Darwin -// calling convention where single arguments are passsed like 32-bit ints. +// calling convention where single arguments are passed like 32-bit ints. // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/nedf2vfp.S b/compiler-rt/lib/builtins/arm/nedf2vfp.S --- a/compiler-rt/lib/builtins/arm/nedf2vfp.S +++ b/compiler-rt/lib/builtins/arm/nedf2vfp.S @@ -11,7 +11,7 @@ // extern double __nedf2vfp(double a, double b); // // Returns zero if a and b are unequal and neither is NaN. -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. .syntax unified diff --git a/compiler-rt/lib/builtins/arm/negdf2vfp.S b/compiler-rt/lib/builtins/arm/negdf2vfp.S --- a/compiler-rt/lib/builtins/arm/negdf2vfp.S +++ b/compiler-rt/lib/builtins/arm/negdf2vfp.S @@ -12,7 +12,7 @@ // extern double __negdf2vfp(double a, double b); // // Returns the negation a double precision floating point numbers using the -// Darwin calling convention where double arguments are passsed in GPR pairs. +// Darwin calling convention where double arguments are passed in GPR pairs. // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/negsf2vfp.S b/compiler-rt/lib/builtins/arm/negsf2vfp.S --- a/compiler-rt/lib/builtins/arm/negsf2vfp.S +++ b/compiler-rt/lib/builtins/arm/negsf2vfp.S @@ -12,7 +12,7 @@ // extern float __negsf2vfp(float a); // // Returns the negation of a single precision floating point numbers using the -// Darwin calling convention where single arguments are passsed like 32-bit ints +// Darwin calling convention where single arguments are passed like 32-bit ints // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/nesf2vfp.S b/compiler-rt/lib/builtins/arm/nesf2vfp.S --- a/compiler-rt/lib/builtins/arm/nesf2vfp.S +++ b/compiler-rt/lib/builtins/arm/nesf2vfp.S @@ -12,7 +12,7 @@ // extern int __nesf2vfp(float a, float b); // // Returns one iff a != b and neither is NaN. -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/subdf3vfp.S b/compiler-rt/lib/builtins/arm/subdf3vfp.S --- a/compiler-rt/lib/builtins/arm/subdf3vfp.S +++ b/compiler-rt/lib/builtins/arm/subdf3vfp.S @@ -12,7 +12,7 @@ // extern double __subdf3vfp(double a, double b); // // Returns difference between two double precision floating point numbers using -// the Darwin calling convention where double arguments are passsed in GPR pairs +// the Darwin calling convention where double arguments are passed in GPR pairs // .syntax unified .p2align 2 diff --git a/compiler-rt/lib/builtins/arm/subsf3vfp.S b/compiler-rt/lib/builtins/arm/subsf3vfp.S --- a/compiler-rt/lib/builtins/arm/subsf3vfp.S +++ b/compiler-rt/lib/builtins/arm/subsf3vfp.S @@ -12,7 +12,7 @@ // extern float __subsf3vfp(float a, float b); // // Returns the difference between two single precision floating point numbers -// using the Darwin calling convention where single arguments are passsed +// using the Darwin calling convention where single arguments are passed // like 32-bit ints. // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/unorddf2vfp.S b/compiler-rt/lib/builtins/arm/unorddf2vfp.S --- a/compiler-rt/lib/builtins/arm/unorddf2vfp.S +++ b/compiler-rt/lib/builtins/arm/unorddf2vfp.S @@ -12,7 +12,7 @@ // extern int __unorddf2vfp(double a, double b); // // Returns one iff a or b is NaN -// Uses Darwin calling convention where double precision arguments are passsed +// Uses Darwin calling convention where double precision arguments are passed // like in GPR pairs. // .syntax unified diff --git a/compiler-rt/lib/builtins/arm/unordsf2vfp.S b/compiler-rt/lib/builtins/arm/unordsf2vfp.S --- a/compiler-rt/lib/builtins/arm/unordsf2vfp.S +++ b/compiler-rt/lib/builtins/arm/unordsf2vfp.S @@ -12,7 +12,7 @@ // extern int __unordsf2vfp(float a, float b); // // Returns one iff a or b is NaN -// Uses Darwin calling convention where single precision arguments are passsed +// Uses Darwin calling convention where single precision arguments are passed // like 32-bit ints // .syntax unified diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h --- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h +++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h @@ -367,7 +367,7 @@ while (RareFeatures.size() > Entropic.NumberOfRarestFeatures && FreqOfMostAbundantRareFeature > Entropic.FeatureFrequencyThreshold) { - // Find most and second most abbundant feature. + // Find most and second most abundant feature. uint32_t MostAbundantRareFeatureIndices[2] = {RareFeatures[0], RareFeatures[0]}; size_t Delete = 0; diff --git a/compiler-rt/lib/profile/InstrProfilingFile.c b/compiler-rt/lib/profile/InstrProfilingFile.c --- a/compiler-rt/lib/profile/InstrProfilingFile.c +++ b/compiler-rt/lib/profile/InstrProfilingFile.c @@ -754,8 +754,9 @@ __llvm_profile_set_page_size(getpagesize()); __llvm_profile_enable_continuous_mode(); #else - PROF_WARN("%s", "Continous mode is currently only supported for Mach-O," - " ELF and COFF formats."); + PROF_WARN("%s", + "Continuous mode is currently only supported for Mach-O," + " ELF and COFF formats."); return -1; #endif } else { diff --git a/compiler-rt/lib/profile/InstrProfilingInternal.h b/compiler-rt/lib/profile/InstrProfilingInternal.h --- a/compiler-rt/lib/profile/InstrProfilingInternal.h +++ b/compiler-rt/lib/profile/InstrProfilingInternal.h @@ -187,7 +187,7 @@ COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer; COMPILER_RT_VISIBILITY extern uint32_t VPBufferSize; COMPILER_RT_VISIBILITY extern uint32_t VPMaxNumValsPerSite; -/* Pointer to the start of static value counters to be allocted. */ +/* Pointer to the start of static value counters to be allocated. */ COMPILER_RT_VISIBILITY extern ValueProfNode *CurrentVNode; COMPILER_RT_VISIBILITY extern ValueProfNode *EndVNode; extern void (*VPMergeHook)(struct ValueProfData *, __llvm_profile_data *); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h @@ -91,7 +91,7 @@ #elif defined(__sparc__) || defined(__mips__) return pc - 8; #elif SANITIZER_RISCV64 - // RV-64 has variable instruciton length... + // RV-64 has variable instruction length... // C extentions gives us 2-byte instructoins // RV-64 has 4-byte instructions // + RISCV architecture allows instructions up to 8 bytes diff --git a/compiler-rt/test/asan/TestCases/leaks.cpp b/compiler-rt/test/asan/TestCases/leaks.cpp --- a/compiler-rt/test/asan/TestCases/leaks.cpp +++ b/compiler-rt/test/asan/TestCases/leaks.cpp @@ -15,7 +15,7 @@ __attribute__((noopt)) void leak(int n) { #if defined(__ANDROID__) || defined(__BIONIC__) - // Bionic does not acutally allocate when n==0, hence + // Bionic does not actually allocate when n==0, hence // there would not be a leak. // Re-adjust n so the test can pass. if (n == 0) diff --git a/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c b/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c --- a/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c +++ b/compiler-rt/test/hwasan/TestCases/use-after-free-and-overflow.c @@ -46,7 +46,7 @@ } __hwasan_tag_memory(Untag(one), 3, 16); __hwasan_tag_memory(Untag(other), 3, 16); - // Tag potential adjaceant allocations with a mismatching tag, otherwise this + // Tag potential adjacent allocations with a mismatching tag, otherwise this // test would flake. __hwasan_tag_memory(Untag(one) + 16, 4, 16); __hwasan_tag_memory(Untag(one) - 16, 4, 16); diff --git a/compiler-rt/test/orc/TestCases/Darwin/arm64/trivial-tlv.S b/compiler-rt/test/orc/TestCases/Darwin/arm64/trivial-tlv.S --- a/compiler-rt/test/orc/TestCases/Darwin/arm64/trivial-tlv.S +++ b/compiler-rt/test/orc/TestCases/Darwin/arm64/trivial-tlv.S @@ -1,7 +1,7 @@ // Test that basic MachO TLVs work by adding together TLVs with values // 0, 1, and -1, and returning the result (0 for success). This setup // tests both zero-initialized (__thread_bss) and non-zero-initialized -// (__thread_data) secitons. +// (__thread_data) sections. // // RUN: %clang -c -o %t %s // RUN: %llvm_jitlink %t diff --git a/compiler-rt/test/orc/TestCases/Darwin/x86-64/trivial-tlv.S b/compiler-rt/test/orc/TestCases/Darwin/x86-64/trivial-tlv.S --- a/compiler-rt/test/orc/TestCases/Darwin/x86-64/trivial-tlv.S +++ b/compiler-rt/test/orc/TestCases/Darwin/x86-64/trivial-tlv.S @@ -4,7 +4,7 @@ // Test that basic MachO TLVs work by adding together TLVs with values // 0, 1, and -1, and returning the result (0 for success). This setup // tests both zero-initialized (__thread_bss) and non-zero-initialized -// (__thread_data) secitons. +// (__thread_data) sections. .section __TEXT,__text,regular,pure_instructions .build_version macos, 11, 0 diff --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py --- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py +++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareAddress.py @@ -4,7 +4,7 @@ # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Commmand sets the path for all following commands to 'declared_file'. +"""Command sets the path for all following commands to 'declared_file'. """ import os diff --git a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py --- a/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py +++ b/cross-project-tests/debuginfo-tests/dexter/dex/command/commands/DexDeclareFile.py @@ -4,7 +4,7 @@ # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -"""Commmand sets the path for all following commands to 'declared_file'. +"""Command sets the path for all following commands to 'declared_file'. """ from pathlib import PurePath diff --git a/flang/docs/Extensions.md b/flang/docs/Extensions.md --- a/flang/docs/Extensions.md +++ b/flang/docs/Extensions.md @@ -140,7 +140,7 @@ for the default kind of INTEGER are assumed to have the least larger kind that can hold them, if one exists. * BOZ literals can be used as INTEGER values in contexts where the type is - unambiguous: the right hand sides of assigments and initializations + unambiguous: the right hand sides of assignments and initializations of INTEGER entities, as actual arguments to a few intrinsic functions (ACHAR, BTEST, CHAR), and as actual arguments of references to procedures with explicit interfaces whose corresponding dummy @@ -154,7 +154,7 @@ * EQUIVALENCE of numeric and character sequences (a ubiquitous extension), as well as of sequences of non-default kinds of numeric types with each other. -* Values for whole anonymous parent components in structure constructors +* Vues for whole anonymous parent components in structure constructors (e.g., `EXTENDEDTYPE(PARENTTYPE(1,2,3))` rather than `EXTENDEDTYPE(1,2,3)` or `EXTENDEDTYPE(PARENTTYPE=PARENTTYPE(1,2,3))`). * Some intrinsic functions are specified in the standard as requiring the @@ -287,7 +287,7 @@ * Use of INTEGER data with the intrinsic logical operators `.NOT.`, `.AND.`, `.OR.`, and `.XOR.`. * IF (integer expression) THEN ... END IF (PGI/Intel) -* Comparsion of LOGICAL with ==/.EQ. rather than .EQV. (also .NEQV.) (PGI/Intel) +* Comparison of LOGICAL with ==/.EQ. rather than .EQV. (also .NEQV.) (PGI/Intel) * Procedure pointers in COMMON blocks (PGI/Intel) * Underindexing multi-dimensional arrays (e.g., A(1) rather than A(1,1)) (PGI only) * Legacy PGI `NCHARACTER` type and `NC` Kanji character literals diff --git a/flang/docs/FIRArrayOperations.md b/flang/docs/FIRArrayOperations.md --- a/flang/docs/FIRArrayOperations.md +++ b/flang/docs/FIRArrayOperations.md @@ -115,7 +115,7 @@ This operation taken with `array_load`'s captures Fortran's copy-in/copy-out semantics. The first operands of `array_merge_store` is the result of the initial `array_load` operation. While this value could be -retrieved by reference chasiing through the different array operations it is +retrieved by reference chasing through the different array operations it is useful to have it on hand directly for analysis passes since this directly defines the "bounds" of the Fortran statement represented by these operations. The intention is to allow copy-in/copy-out regions to be easily delineated, diff --git a/flang/examples/FlangOmpReport/yaml_summarizer.py b/flang/examples/FlangOmpReport/yaml_summarizer.py --- a/flang/examples/FlangOmpReport/yaml_summarizer.py +++ b/flang/examples/FlangOmpReport/yaml_summarizer.py @@ -44,7 +44,7 @@ $ python3 yaml_summarizer.py file_1.yaml file_2.yaml - Construcsts are in the form: + Constructs are in the form: - construct: someOMPconstruct count: 8 clauses: diff --git a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp --- a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp +++ b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp @@ -434,7 +434,7 @@ return mlir::success(); } - /// If the signature does not need any special target-specific converions, + /// If the signature does not need any special target-specific conversions, /// then it is considered portable for any target, and this function will /// return `true`. Otherwise, the signature is not portable and `false` is /// returned. diff --git a/flang/lib/Semantics/check-nullify.cpp b/flang/lib/Semantics/check-nullify.cpp --- a/flang/lib/Semantics/check-nullify.cpp +++ b/flang/lib/Semantics/check-nullify.cpp @@ -60,7 +60,7 @@ // A pointer-object shall not depend on the value, // bounds, or association status of another pointer- // object in the same NULLIFY statement. - // This restriction is the programmer's responsibilty. + // This restriction is the programmer's responsibility. // Some dependencies can be found compile time or at // runtime, but for now we choose to skip such checks. } diff --git a/flang/test/Driver/default-backend-pipelines.f90 b/flang/test/Driver/default-backend-pipelines.f90 --- a/flang/test/Driver/default-backend-pipelines.f90 +++ b/flang/test/Driver/default-backend-pipelines.f90 @@ -1,4 +1,4 @@ -! Verify that`-O{n}` is indeed taken into account when definining the LLVM backend pass pipeline. +! Verify that`-O{n}` is indeed taken into account when defining the LLVM backend pass pipeline. ! REQUIRES: aarch64-registered-target diff --git a/flang/test/Semantics/altreturn06.f90 b/flang/test/Semantics/altreturn06.f90 --- a/flang/test/Semantics/altreturn06.f90 +++ b/flang/test/Semantics/altreturn06.f90 @@ -1,5 +1,5 @@ ! RUN: %python %S/test_errors.py %s %flang_fc1 -! Test alternat return argument passing for internal and external subprograms +! Test alternate return argument passing for internal and external subprograms ! Both of the following are OK call extSubprogram (*100) call intSubprogram (*100) diff --git a/flang/test/Semantics/call27.f90 b/flang/test/Semantics/call27.f90 --- a/flang/test/Semantics/call27.f90 +++ b/flang/test/Semantics/call27.f90 @@ -1,5 +1,5 @@ ! RUN: %python %S/test_errors.py %s %flang_fc1 -! Catch NULL() actual argement association with allocatable dummy argument +! Catch NULL() actual argument association with allocatable dummy argument program test !ERROR: Null actual argument 'NULL()' may not be associated with allocatable dummy argument 'a=' call foo1(null()) diff --git a/libc/benchmarks/automemcpy/unittests/ResultAnalyzerTest.cpp b/libc/benchmarks/automemcpy/unittests/ResultAnalyzerTest.cpp --- a/libc/benchmarks/automemcpy/unittests/ResultAnalyzerTest.cpp +++ b/libc/benchmarks/automemcpy/unittests/ResultAnalyzerTest.cpp @@ -25,7 +25,7 @@ static constexpr SampleId Id = {Foo1, DistA}; static constexpr Sample kSamples[] = { Sample{Id, SampleType::ITERATION, 4}, - Sample{Id, SampleType::AGGREGATE, -1}, // Aggegates gets discarded + Sample{Id, SampleType::AGGREGATE, -1}, // Aggregates gets discarded }; const std::vector Data = getThroughputs(kSamples); diff --git a/libc/cmake/modules/LLVMLibCObjectRules.cmake b/libc/cmake/modules/LLVMLibCObjectRules.cmake --- a/libc/cmake/modules/LLVMLibCObjectRules.cmake +++ b/libc/cmake/modules/LLVMLibCObjectRules.cmake @@ -383,7 +383,7 @@ # compiler and the compiled clang-tidy. if(COMPILER_RESOURCE_DIR) # We run restrict-system-libc-headers with --system-headers to prevent - # transitive inclusion through compler provided headers. + # transitive inclusion through compiler provided headers. set(restrict_system_headers_check_invocation COMMAND ${LLVM_LIBC_CLANG_TIDY} --system-headers --checks="-*,llvmlibc-restrict-system-libc-headers" diff --git a/libc/config/linux/app.h b/libc/config/linux/app.h --- a/libc/config/linux/app.h +++ b/libc/config/linux/app.h @@ -87,7 +87,7 @@ uintptr_t addr = 0; // The value the thread pointer register should be initialized to. - // Note that, dependending the target architecture ABI, it can be the + // Note that, depending on the target architecture ABI, it can be the // same as |addr| or something else. uintptr_t tp = 0; diff --git a/libc/test/src/__support/uint128_test.cpp b/libc/test/src/__support/uint128_test.cpp --- a/libc/test/src/__support/uint128_test.cpp +++ b/libc/test/src/__support/uint128_test.cpp @@ -44,7 +44,7 @@ EXPECT_EQ((val1 * val2), result1); EXPECT_EQ((val1 * val2), (val2 * val1)); // multiplication is reciprocal - // Check that the multiplication works accross the whole number + // Check that the multiplication works across the whole number LL_UInt128 val3({0xf, 0}); LL_UInt128 val4({0x1111111111111111, 0x1111111111111111}); LL_UInt128 result2({0xffffffffffffffff, 0xffffffffffffffff}); diff --git a/libc/test/src/stdio/fileop_test.cpp b/libc/test/src/stdio/fileop_test.cpp --- a/libc/test/src/stdio/fileop_test.cpp +++ b/libc/test/src/stdio/fileop_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for file operations like fopen, flcose etc --------------===// +//===-- Unittests for file operations like fopen, fclose etc --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/libc/test/src/stdio/unlocked_fileop_test.cpp b/libc/test/src/stdio/unlocked_fileop_test.cpp --- a/libc/test/src/stdio/unlocked_fileop_test.cpp +++ b/libc/test/src/stdio/unlocked_fileop_test.cpp @@ -1,4 +1,4 @@ -//===-- Unittests for f operations like fopen, flcose etc --------------===// +//===-- Unittests for f operations like fopen, fclose etc --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/libcxx/src/support/runtime/stdexcept_vcruntime.ipp b/libcxx/src/support/runtime/stdexcept_vcruntime.ipp --- a/libcxx/src/support/runtime/stdexcept_vcruntime.ipp +++ b/libcxx/src/support/runtime/stdexcept_vcruntime.ipp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// #ifndef _LIBCPP_ABI_VCRUNTIME -#error This file may only be used when defering to vcruntime +#error This file may only be used when deferring to vcruntime #endif namespace std { diff --git a/libunwind/docs/conf.py b/libunwind/docs/conf.py --- a/libunwind/docs/conf.py +++ b/libunwind/docs/conf.py @@ -242,7 +242,7 @@ #texinfo_show_urls = 'footnote' -# FIXME: Define intersphinx configration. +# FIXME: Define intersphinx configuration. intersphinx_mapping = {} diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -920,7 +920,7 @@ // at runtime, the notion of PC-relative doesn't make sense here. So, // this is a usage error. However, GNU linkers historically accept such // relocations without any errors and relocate them as if they were at - // address 0. For bug-compatibilty, we accept them with warnings. We + // address 0. For bug-compatibility, we accept them with warnings. We // know Steel Bank Common Lisp as of 2018 have this bug. warn(msg); target.relocateNoSym( diff --git a/lld/MachO/Options.td b/lld/MachO/Options.td --- a/lld/MachO/Options.td +++ b/lld/MachO/Options.td @@ -323,7 +323,7 @@ Flags<[HelpHidden]>, Group; def no_deduplicate : Flag<["-"], "no_deduplicate">, - HelpText<"Disable code deduplicaiton (synonym for `--icf=none')">, + HelpText<"Disable code deduplication (synonym for `--icf=none')">, Alias, AliasArgs<["none"]>, Group; diff --git a/lld/test/ELF/common-archive-lookup.s b/lld/test/ELF/common-archive-lookup.s --- a/lld/test/ELF/common-archive-lookup.s +++ b/lld/test/ELF/common-archive-lookup.s @@ -91,7 +91,7 @@ # NFC-NEXT: Other: 0 # NFC-NEXT: Section: .bss -## Expecting the strong definition from the object file, and the defintions from +## Expecting the strong definition from the object file, and the definitions from ## the archive do not interfere. # TEST2-LABEL: Disassembly of section .data: # TEST2: : diff --git a/lld/test/ELF/init-fini.s b/lld/test/ELF/init-fini.s --- a/lld/test/ELF/init-fini.s +++ b/lld/test/ELF/init-fini.s @@ -47,7 +47,7 @@ // NOENTRY: ] // Should not add entries for "_init" and "_fini" to the symbol table -// if the symbols are defined in non-fetched achive members. +// if the symbols are defined in non-fetched archive members. // RUN: rm -f %t.a // RUN: llvm-ar rcs %t.a %t // RUN: ld.lld -shared -m elf_x86_64 -e _unknown %t.a -o %t.so diff --git a/lld/test/ELF/lto/duplicated-name.ll b/lld/test/ELF/lto/duplicated-name.ll --- a/lld/test/ELF/lto/duplicated-name.ll +++ b/lld/test/ELF/lto/duplicated-name.ll @@ -1,5 +1,5 @@ ; REQUIRES: x86 -; Cretae two archive with the same member name +; Create two archive with the same member name ; RUN: rm -f %t1.a %t2.a ; RUN: opt -module-summary %s -o %t.o ; RUN: llvm-ar rcS %t1.a %t.o diff --git a/lld/test/MachO/tools/generate-cfi-funcs.py b/lld/test/MachO/tools/generate-cfi-funcs.py --- a/lld/test/MachO/tools/generate-cfi-funcs.py +++ b/lld/test/MachO/tools/generate-cfi-funcs.py @@ -67,7 +67,7 @@ return func_size def random_seed(): - """Generate a seed that can easily be passsed back in via --seed=STRING""" + """Generate a seed that can easily be passed back in via --seed=STRING""" return ''.join(random.choice(string.ascii_lowercase) for i in range(10)) def main(): diff --git a/lld/test/MachO/tools/generate-thunkable-program.py b/lld/test/MachO/tools/generate-thunkable-program.py --- a/lld/test/MachO/tools/generate-thunkable-program.py +++ b/lld/test/MachO/tools/generate-thunkable-program.py @@ -362,7 +362,7 @@ print_here_tail() def random_seed(): - """Generate a seed that can easily be passsed back in via --seed=STRING""" + """Generate a seed that can easily be passed back in via --seed=STRING""" return ''.join(random.choice(string.ascii_lowercase) for i in range(10)) def generate_sizes(base, megabytes): diff --git a/lld/test/wasm/debuginfo-undefined-global.s b/lld/test/wasm/debuginfo-undefined-global.s --- a/lld/test/wasm/debuginfo-undefined-global.s +++ b/lld/test/wasm/debuginfo-undefined-global.s @@ -3,7 +3,7 @@ # RUN: obj2yaml %t.wasm | FileCheck %s # Debug sections are allowed to contains references to non-live symbols that -# then get GC'd. In this test the .debug_info seciton contains a reference to +# then get GC'd. In this test the .debug_info section contains a reference to # foo which is not otherwise used and will not be marked a live in the output. # Verify the tombstone value is written to debug_info section. diff --git a/lldb/bindings/interface/SBProcess.i b/lldb/bindings/interface/SBProcess.i --- a/lldb/bindings/interface/SBProcess.i +++ b/lldb/bindings/interface/SBProcess.i @@ -422,7 +422,7 @@ %feature("autodoc", " Allocates a block of memory within the process, with size and - access permissions specified in the arguments. The permisssions + access permissions specified in the arguments. The permissions argument is an or-combination of zero or more of lldb.ePermissionsWritable, lldb.ePermissionsReadable, and lldb.ePermissionsExecutable. Returns the address diff --git a/lldb/bindings/interface/SBType.i b/lldb/bindings/interface/SBType.i --- a/lldb/bindings/interface/SBType.i +++ b/lldb/bindings/interface/SBType.i @@ -537,8 +537,8 @@ "Returns the `BasicType` value that is most appropriate to this type. Returns `eBasicTypeInvalid` if no appropriate `BasicType` was found or this - type is invalid. See the `BasicType` documentation for the language-specific m - aning of each `BasicType` value. + type is invalid. See the `BasicType` documentation for the language-specific + meaning of each `BasicType` value. **Overload behaviour:** When called with a `BasicType` parameter, the following behaviour applies: @@ -731,8 +731,8 @@ * C: Always returns ``0``. * C++: If this type is a class template instantiation then this returns the - number of template parameters that were used in this instantiation. This i - cludes both explicit and implicit template parameters. + number of template parameters that were used in this instantiation. This + includes both explicit and implicit template parameters. * Objective-C: Always returns ``0``. ") GetNumberOfTemplateArguments; uint32_t diff --git a/lldb/docs/use/python-reference.rst b/lldb/docs/use/python-reference.rst --- a/lldb/docs/use/python-reference.rst +++ b/lldb/docs/use/python-reference.rst @@ -608,7 +608,7 @@ # Finally, dispose of the debugger you just made. lldb.SBDebugger.Destroy(debugger) - # Terminate the debug sesssion + # Terminate the debug session lldb.SBDebugger.Terminate() diff --git a/lldb/examples/python/armv7_cortex_m_target_defintion.py b/lldb/examples/python/armv7_cortex_m_target_defintion.py --- a/lldb/examples/python/armv7_cortex_m_target_defintion.py +++ b/lldb/examples/python/armv7_cortex_m_target_defintion.py @@ -31,7 +31,7 @@ # # USAGE # -# (lldb) settings set plugin.process.gdb-remote.target-definition-file /path/to/armv7_cortex_m_target_defintion.py +# (lldb) settings set plugin.process.gdb-remote.target-definition-file /path/to/armv7_cortex_m_target_definition.py # (lldb) gdb-remote other.baz.com:1234 # # The target definition file will get used if and only if the diff --git a/lldb/examples/python/lldb_module_utils.py b/lldb/examples/python/lldb_module_utils.py --- a/lldb/examples/python/lldb_module_utils.py +++ b/lldb/examples/python/lldb_module_utils.py @@ -10,7 +10,7 @@ class DumpLineTables: command_name = "dump-line-tables" - short_decription = "Dumps full paths to compile unit files and optionally all line table files." + short_description = "Dumps full paths to compile unit files and optionally all line table files." description = 'Dumps all line tables from all compile units for any modules specified as arguments. Specifying the --verbose flag will output address ranges for each line entry.' usage = "usage: %prog [options] MODULE1 [MODULE2 ...]" def create_options(self): @@ -28,7 +28,7 @@ default=False) def get_short_help(self): - return self.short_decription + return self.short_description def get_long_help(self): return self.help_string diff --git a/lldb/include/lldb/Core/Debugger.h b/lldb/include/lldb/Core/Debugger.h --- a/lldb/include/lldb/Core/Debugger.h +++ b/lldb/include/lldb/Core/Debugger.h @@ -404,7 +404,7 @@ /// If a pointer is passed to a std::once_flag, then it will be used to /// ensure the given warning is only broadcast once. static void - ReportWarning(std::string messsage, + ReportWarning(std::string message, llvm::Optional debugger_id = llvm::None, std::once_flag *once = nullptr); @@ -426,7 +426,7 @@ /// If a pointer is passed to a std::once_flag, then it will be used to /// ensure the given error is only broadcast once. static void - ReportError(std::string messsage, + ReportError(std::string message, llvm::Optional debugger_id = llvm::None, std::once_flag *once = nullptr); diff --git a/lldb/include/lldb/Core/Mangled.h b/lldb/include/lldb/Core/Mangled.h --- a/lldb/include/lldb/Core/Mangled.h +++ b/lldb/include/lldb/Core/Mangled.h @@ -26,7 +26,7 @@ /// /// Designed to handle mangled names. The demangled version of any names will /// be computed when the demangled name is accessed through the Demangled() -/// acccessor. This class can also tokenize the demangled version of the name +/// accessor. This class can also tokenize the demangled version of the name /// for powerful searches. Functions and symbols could make instances of this /// class for their mangled names. Uniqued string pools are used for the /// mangled, demangled, and token string values to allow for faster diff --git a/lldb/include/lldb/Symbol/Type.h b/lldb/include/lldb/Symbol/Type.h --- a/lldb/include/lldb/Symbol/Type.h +++ b/lldb/include/lldb/Symbol/Type.h @@ -123,7 +123,7 @@ /// GetModule may return module for compile unit's object file. /// GetExeModule returns module for executable object file that contains - /// compile unit where type was actualy defined. + /// compile unit where type was actually defined. /// GetModule and GetExeModule may return the same value. lldb::ModuleSP GetExeModule(); diff --git a/lldb/packages/Python/lldbsuite/test/lldbbench.py b/lldb/packages/Python/lldbsuite/test/lldbbench.py --- a/lldb/packages/Python/lldbsuite/test/lldbbench.py +++ b/lldb/packages/Python/lldbsuite/test/lldbbench.py @@ -12,7 +12,7 @@ class Stopwatch(object): """Stopwatch provides a simple utility to start/stop your stopwatch multiple times. Each start/stop is equal to a lap, with its elapsed time accumulated - while measurment is in progress. + while measurement is in progress. When you're ready to start from scratch for another round of measurements, be sure to call the reset() method. diff --git a/lldb/source/Core/DynamicLoader.cpp b/lldb/source/Core/DynamicLoader.cpp --- a/lldb/source/Core/DynamicLoader.cpp +++ b/lldb/source/Core/DynamicLoader.cpp @@ -58,7 +58,7 @@ DynamicLoader::DynamicLoader(Process *process) : m_process(process) {} -// Accessosors to the global setting as to whether to stop at image (shared +// Accessors to the global setting as to whether to stop at image (shared // library) loading/unloading. bool DynamicLoader::GetStopWhenImagesChange() const { diff --git a/lldb/source/Core/IOHandlerCursesGUI.cpp b/lldb/source/Core/IOHandlerCursesGUI.cpp --- a/lldb/source/Core/IOHandlerCursesGUI.cpp +++ b/lldb/source/Core/IOHandlerCursesGUI.cpp @@ -3112,11 +3112,11 @@ static constexpr const char *kLoadDependentFilesExecOnly = "Executable only"; std::vector GetLoadDependentFilesChoices() { - std::vector load_depentents_options; - load_depentents_options.push_back(kLoadDependentFilesExecOnly); - load_depentents_options.push_back(kLoadDependentFilesYes); - load_depentents_options.push_back(kLoadDependentFilesNo); - return load_depentents_options; + std::vector load_dependents_options; + load_dependents_options.push_back(kLoadDependentFilesExecOnly); + load_dependents_options.push_back(kLoadDependentFilesYes); + load_dependents_options.push_back(kLoadDependentFilesNo); + return load_dependents_options; } LoadDependentFiles GetLoadDependentFiles() { diff --git a/lldb/source/Expression/DWARFExpression.cpp b/lldb/source/Expression/DWARFExpression.cpp --- a/lldb/source/Expression/DWARFExpression.cpp +++ b/lldb/source/Expression/DWARFExpression.cpp @@ -471,7 +471,7 @@ // by a file address on the stack. We assume that DW_OP_const4u or // DW_OP_const8u is used for these values, and we check that the last // opcode we got before either of these was DW_OP_const4u or - // DW_OP_const8u. If so, then we can link the value accodingly. For + // DW_OP_const8u. If so, then we can link the value accordingly. For // Darwin, the value in the DW_OP_const4u or DW_OP_const8u is the file // address of a structure that contains a function pointer, the pthread // key and the offset into the data pointed to by the pthread key. So we @@ -735,7 +735,7 @@ Value *value = nullptr) { // Note that this function is conflating DWARF expressions with // DWARF location descriptions. Perhaps it would be better to define - // a wrapper for DWARFExpresssion::Eval() that deals with DWARF + // a wrapper for DWARFExpression::Eval() that deals with DWARF // location descriptions (which consist of one or more DWARF // expressions). But doing this would mean we'd also need factor the // handling of DW_OP_(bit_)piece out of this function. @@ -773,7 +773,7 @@ /// \param dw_op_type C-style string used to vary the error output /// \param file_addr the file address we are trying to resolve and turn into a /// load address -/// \param so_addr out parameter, will be set to load addresss or section offset +/// \param so_addr out parameter, will be set to load address or section offset /// \param check_sectionoffset bool which determines if having a section offset /// but not a load address is considerd a success /// \returns llvm::Optional containing the load address if resolving and getting diff --git a/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp b/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp --- a/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp +++ b/lldb/source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp @@ -592,7 +592,7 @@ // (e.g. com.example.myapplication) instead of the main process binary // (/system/bin/app_process(32)). The logic is not sound in general (it // assumes base_addr is the real address, even though it actually is a load - // bias), but it happens to work on adroid because app_process has a file + // bias), but it happens to work on android because app_process has a file // address of zero. // This should be removed after we drop support for android-23. if (m_process->GetTarget().GetArchitecture().GetTriple().isAndroid()) { diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp --- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp +++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp @@ -1915,7 +1915,7 @@ // We failed to copy the type we found LLDB_LOG(log, " Failed to import the function type '{0}' ({1:x})" - " into the expression parser AST contenxt", + " into the expression parser AST context", function_type->GetName(), function_type->GetID()); return; diff --git a/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp b/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp --- a/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp +++ b/lldb/source/Plugins/Platform/gdb-server/PlatformRemoteGDBServer.cpp @@ -803,7 +803,7 @@ for (size_t i = 0; i < connection_urls.size(); ++i) { ConnectProcess(connection_urls[i].c_str(), "gdb-remote", debugger, nullptr, error); if (error.Fail()) - return i; // We already connected to i process succsessfully + return i; // We already connected to i process successfully } return connection_urls.size(); } diff --git a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp --- a/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ b/lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -996,8 +996,8 @@ process_arch.GetTriple().getTriple()); } - if (int addresssable_bits = m_gdb_comm.GetAddressingBits()) { - lldb::addr_t address_mask = ~((1ULL << addresssable_bits) - 1); + if (int addressable_bits = m_gdb_comm.GetAddressingBits()) { + lldb::addr_t address_mask = ~((1ULL << addressable_bits) - 1); SetCodeAddressMask(address_mask); SetDataAddressMask(address_mask); } diff --git a/lldb/source/Plugins/Process/minidump/MinidumpParser.cpp b/lldb/source/Plugins/Process/minidump/MinidumpParser.cpp --- a/lldb/source/Plugins/Process/minidump/MinidumpParser.cpp +++ b/lldb/source/Plugins/Process/minidump/MinidumpParser.cpp @@ -351,7 +351,7 @@ // Create memory regions from the linux maps only. We do this to avoid issues // with breakpad generated minidumps where if someone has mmap'ed a shared - // library into memory to accesss its data in the object file, we can get a + // library into memory to access its data in the object file, we can get a // minidump with two mappings for a binary: one whose base image points to a // memory region that is read + execute and one that is read only. MemoryRegionInfos linux_regions; diff --git a/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp b/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp --- a/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp +++ b/lldb/source/Plugins/Process/scripted/ScriptedThread.cpp @@ -163,7 +163,7 @@ llvm::Twine( "StackFrame array size (" + llvm::Twine(arr_size) + llvm::Twine( - ") is greater than maximum autorized for a StackFrameList.")) + ") is greater than maximum authorized for a StackFrameList.")) .str(), error, LLDBLog::Thread); diff --git a/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp b/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp --- a/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp +++ b/lldb/source/Plugins/SymbolFile/Breakpad/SymbolFileBreakpad.cpp @@ -294,7 +294,7 @@ ParseInlineOriginRecords(); // A vector of current each level's parent block. For example, when parsing // "INLINE 0 ...", the current level is 0 and its parent block is the - // funciton block at index 0. + // function block at index 0. std::vector blocks; Block &block = func.GetBlock(false); block.AddRange(Block::Range(0, func.GetAddressRange().GetByteSize())); diff --git a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp --- a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp +++ b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp @@ -686,7 +686,7 @@ if (TypeSystemClang::StartTagDeclarationDefinition(element_ast_type)) { TypeSystemClang::CompleteTagDeclarationDefinition(element_ast_type); } else { - // We are not able to start defintion. + // We are not able to start definition. return nullptr; } } diff --git a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp --- a/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp +++ b/lldb/source/Plugins/SymbolFile/PDB/SymbolFilePDB.cpp @@ -1940,7 +1940,7 @@ if (!func_decorated_name.empty()) { mangled.SetMangledName(ConstString(func_decorated_name)); - // For MSVC, format of C funciton's decorated name depends on calling + // For MSVC, format of C function's decorated name depends on calling // convention. Unfortunately none of the format is recognized by current // LLDB. For example, `_purecall` is a __cdecl C function. From PDB, // `__purecall` is retrieved as both its decorated and undecorated name diff --git a/lldb/source/Symbol/Type.cpp b/lldb/source/Symbol/Type.cpp --- a/lldb/source/Symbol/Type.cpp +++ b/lldb/source/Symbol/Type.cpp @@ -184,7 +184,7 @@ } } - // Call the get byte size accesor so we resolve our byte size + // Call the get byte size accessor so we resolve our byte size if (GetByteSize(exe_scope)) s->Printf(", byte-size = %" PRIu64, m_byte_size); bool show_fullpaths = (level == lldb::eDescriptionLevelVerbose); diff --git a/lldb/test/API/commands/expression/codegen-crash-import-def-arraytype-element/main.cpp b/lldb/test/API/commands/expression/codegen-crash-import-def-arraytype-element/main.cpp --- a/lldb/test/API/commands/expression/codegen-crash-import-def-arraytype-element/main.cpp +++ b/lldb/test/API/commands/expression/codegen-crash-import-def-arraytype-element/main.cpp @@ -11,7 +11,7 @@ struct B { // When we import the all the FieldDecl we need to check if we have an // ArrayType and then check if the ElementType is a RecordDecl and if so - // import the defintion. Otherwise during codegen we will attempt to layout A + // import the definition. Otherwise during codegen we will attempt to layout A // but won't be able to. A s1[2]; A s2[2][2][3]; diff --git a/lldb/test/API/functionalities/breakpoint/debugbreak/TestDebugBreak.py b/lldb/test/API/functionalities/breakpoint/debugbreak/TestDebugBreak.py --- a/lldb/test/API/functionalities/breakpoint/debugbreak/TestDebugBreak.py +++ b/lldb/test/API/functionalities/breakpoint/debugbreak/TestDebugBreak.py @@ -32,7 +32,7 @@ thread, "Unable to find thread stopped at the __debugbreak()") frame = thread.GetFrameAtIndex(0) - # We should be in funciton 'bar'. + # We should be in function 'bar'. self.assertTrue(frame.IsValid()) function_name = frame.GetFunctionName() self.assertIn('bar', function_name, diff --git a/lldb/test/API/functionalities/load_unload/TestLoadUnload.py b/lldb/test/API/functionalities/load_unload/TestLoadUnload.py --- a/lldb/test/API/functionalities/load_unload/TestLoadUnload.py +++ b/lldb/test/API/functionalities/load_unload/TestLoadUnload.py @@ -325,7 +325,7 @@ # The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno = 1, expected_hit_count = 1) - # Issue the 'continue' command. We should stop agaian at a_function. + # Issue the 'continue' command. We should stop again at a_function. # The stop reason of the thread should be breakpoint and at a_function. self.runCmd("continue") diff --git a/lldb/test/API/functionalities/memory/tag/TestMemoryTag.py b/lldb/test/API/functionalities/memory/tag/TestMemoryTag.py --- a/lldb/test/API/functionalities/memory/tag/TestMemoryTag.py +++ b/lldb/test/API/functionalities/memory/tag/TestMemoryTag.py @@ -29,7 +29,7 @@ self.runCmd("run", RUN_SUCCEEDED) # If you're on AArch64 you could have MTE but the remote process - # must also support it. If you're on any other arhcitecture you + # must also support it. If you're on any other architecture you # won't have any tagging at all. So the error message is different. if self.isAArch64(): expected = "error: Process does not support memory tagging" diff --git a/lldb/test/API/functionalities/module_cache/bsd/TestModuleCacheBSD.py b/lldb/test/API/functionalities/module_cache/bsd/TestModuleCacheBSD.py --- a/lldb/test/API/functionalities/module_cache/bsd/TestModuleCacheBSD.py +++ b/lldb/test/API/functionalities/module_cache/bsd/TestModuleCacheBSD.py @@ -77,7 +77,7 @@ """ exe = self.getBuildArtifact("a.out") - # Create a module with no depedencies. + # Create a module with no dependencies. target = self.createTestTarget(load_dependent_modules=False) self.runCmd('breakpoint set -f a.c -l %d' % (self.line_a)) diff --git a/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py b/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py --- a/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py +++ b/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py @@ -44,7 +44,7 @@ """ exe = self.getBuildArtifact("a.out") - # Create a module with no depedencies. + # Create a module with no dependencies. target = self.createTestTarget(load_dependent_modules=False) # Get the executable module and get the number of symbols to make diff --git a/lldb/test/API/functionalities/module_cache/universal/TestModuleCacheUniversal.py b/lldb/test/API/functionalities/module_cache/universal/TestModuleCacheUniversal.py --- a/lldb/test/API/functionalities/module_cache/universal/TestModuleCacheUniversal.py +++ b/lldb/test/API/functionalities/module_cache/universal/TestModuleCacheUniversal.py @@ -43,7 +43,7 @@ exe = self.getBuildArtifact(exe_basename) self.yaml2obj(yaml_path, exe) self.assertTrue(os.path.exists(exe)) - # Create a module with no depedencies. + # Create a module with no dependencies. self.runCmd('target create -d --arch x86_64 %s' % (exe)) self.runCmd('image dump symtab %s' % (exe_basename)) self.runCmd('target create -d --arch arm64 %s' % (exe)) diff --git a/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py b/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py --- a/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py +++ b/lldb/test/API/functionalities/postmortem/elf-core/TestLinuxCore.py @@ -469,7 +469,7 @@ @skipIfLLVMTargetMissing("AArch64") def test_aarch64_pac_regs(self): - # Test AArch64/Linux Pointer Authenication register read + # Test AArch64/Linux Pointer Authentication register read target = self.dbg.CreateTarget(None) self.assertTrue(target, VALID_TARGET) process = target.LoadCore("linux-aarch64-pac.core") diff --git a/lldb/test/API/functionalities/postmortem/minidump-new/makefile.txt b/lldb/test/API/functionalities/postmortem/minidump-new/makefile.txt --- a/lldb/test/API/functionalities/postmortem/minidump-new/makefile.txt +++ b/lldb/test/API/functionalities/postmortem/minidump-new/makefile.txt @@ -3,7 +3,7 @@ # The binary should have debug symbols because stack unwinding doesn't work # correctly using the information in the Minidump only. Also we want to evaluate # local variables, etc. -# Breakpad compiles as a static library, so statically linking againts it +# Breakpad compiles as a static library, so statically linking against it # makes the binary huge. # Dynamically linking to it does improve things, but we are still #include-ing # breakpad headers (which is a lot of source code for which we generate debug diff --git a/lldb/tools/debugserver/source/DNBTimer.h b/lldb/tools/debugserver/source/DNBTimer.h --- a/lldb/tools/debugserver/source/DNBTimer.h +++ b/lldb/tools/debugserver/source/DNBTimer.h @@ -53,7 +53,7 @@ PTHREAD_MUTEX_LOCKER(locker, m_mutexAP.get()); gettimeofday(&m_timeval, NULL); } - // Get the total mircoseconds since Jan 1, 1970 + // Get the total microseconds since Jan 1, 1970 uint64_t TotalMicroSeconds() const { PTHREAD_MUTEX_LOCKER(locker, m_mutexAP.get()); return (uint64_t)(m_timeval.tv_sec) * 1000000ull + diff --git a/lldb/tools/debugserver/source/JSON.h b/lldb/tools/debugserver/source/JSON.h --- a/lldb/tools/debugserver/source/JSON.h +++ b/lldb/tools/debugserver/source/JSON.h @@ -71,7 +71,7 @@ public: typedef std::shared_ptr SP; - // We cretae a constructor for all integer and floating point type with using + // We create a constructor for all integer and floating point type with using // templates and // SFINAE to avoid having ambiguous overloads because of the implicit type // promotion. If we diff --git a/lldb/tools/debugserver/source/MacOSX/Genealogy.cpp b/lldb/tools/debugserver/source/MacOSX/Genealogy.cpp --- a/lldb/tools/debugserver/source/MacOSX/Genealogy.cpp +++ b/lldb/tools/debugserver/source/MacOSX/Genealogy.cpp @@ -129,7 +129,7 @@ return true; }); - // Collect all the Activites + // Collect all the Activities m_os_activity_iterate_activities( process_info->activities, process_info, ^bool(os_activity_entry_t activity) { diff --git a/lldb/unittests/Utility/UUIDTest.cpp b/lldb/unittests/Utility/UUIDTest.cpp --- a/lldb/unittests/Utility/UUIDTest.cpp +++ b/lldb/unittests/Utility/UUIDTest.cpp @@ -83,7 +83,7 @@ EXPECT_FALSE(u.SetFromStringRef("4")); } -TEST(UUIDTest, StringConverion) { +TEST(UUIDTest, StringConversion) { EXPECT_EQ("40414243", UUID::fromData("@ABC", 4).GetAsString()); EXPECT_EQ("40414243-4445-4647", UUID::fromData("@ABCDEFG", 8).GetAsString()); EXPECT_EQ("40414243-4445-4647-4849-4A4B", diff --git a/llvm/cmake/config.guess b/llvm/cmake/config.guess --- a/llvm/cmake/config.guess +++ b/llvm/cmake/config.guess @@ -4,7 +4,7 @@ # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011 Free Software Foundation, Inc. -timestamp='2011-08-20' +timestamp='2022-07-29' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -1098,7 +1098,7 @@ # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake --- a/llvm/cmake/modules/AddLLVM.cmake +++ b/llvm/cmake/modules/AddLLVM.cmake @@ -506,7 +506,7 @@ add_dependencies(${obj_name} ${ARG_DEPENDS}) endif() # Treat link libraries like PUBLIC dependencies. LINK_LIBS might - # result in generating header files. Add a dependendency so that + # result in generating header files. Add a dependency so that # the generated header is created before this object library. if(ARG_LINK_LIBS) cmake_parse_arguments(LINK_LIBS_ARG @@ -782,7 +782,7 @@ # - LLVM_LINK_COMPONENTS: a list of component this component depends on # - COMPONENT_HAS_JIT: (only for group component) whether this target group # supports JIT compilation -# Additionnaly, the ADD_TO_COMPONENT option make it possible to add this +# Additionally, the ADD_TO_COMPONENT option make it possible to add this # component to the LLVM_LINK_COMPONENTS of . function(add_llvm_component_library name) cmake_parse_arguments(ARG diff --git a/llvm/cmake/modules/CoverageReport.cmake b/llvm/cmake/modules/CoverageReport.cmake --- a/llvm/cmake/modules/CoverageReport.cmake +++ b/llvm/cmake/modules/CoverageReport.cmake @@ -54,7 +54,7 @@ # This currently only works for LLVM, but could be expanded to work for all # sub-projects. The current limitation is based on not having a good way to -# automaticall plumb through the targets that we want to run coverage against. +# automatically plumb through the targets that we want to run coverage against. add_custom_target(generate-coverage-report COMMAND ${Python3_EXECUTABLE} ${PREPARE_CODE_COV_ARTIFACT} ${LLVM_PROFDATA} ${LLVM_COV} ${LLVM_PROFILE_DATA_DIR} diff --git a/llvm/docs/GlobalISel/GMIR.rst b/llvm/docs/GlobalISel/GMIR.rst --- a/llvm/docs/GlobalISel/GMIR.rst +++ b/llvm/docs/GlobalISel/GMIR.rst @@ -195,7 +195,7 @@ Pointer types are distinguished by address space. This matches IR, as opposed to SelectionDAG where address space is an attribute on operations. This representation better supports pointers having different sizes depending -on their addressspace. +on their address space. .. note:: diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -3990,7 +3990,7 @@ .. note:: - A '``poison``' value (decribed in the next section) should be used instead of + A '``poison``' value (described in the next section) should be used instead of '``undef``' whenever possible. Poison values are stronger than undef, and enable more optimizations. Just the existence of '``undef``' blocks certain optimizations (see the examples below). diff --git a/llvm/docs/MemorySSA.rst b/llvm/docs/MemorySSA.rst --- a/llvm/docs/MemorySSA.rst +++ b/llvm/docs/MemorySSA.rst @@ -470,10 +470,10 @@ results' precision provided by ``MemorySSA``. For example, AliasAnalysis has various caps, or restrictions on looking through phis which can affect what ``MemorySSA`` can infer. Changes made by different passes may make MemorySSA either "overly -optimized" (it can provide a more acccurate result than if it were recomputed +optimized" (it can provide a more accurate result than if it were recomputed from scratch), or "under optimized" (it could infer more if it were recomputed). This can lead to challenges to reproduced results in isolation with a single pass -when the result relies on the state aquired by ``MemorySSA`` due to being updated by +when the result relies on the state acquired by ``MemorySSA`` due to being updated by multiple subsequent passes. Passes that use and update ``MemorySSA`` should do so through the APIs provided by the ``MemorySSAUpdater``, or through calls on the Walker. diff --git a/llvm/include/llvm-c/Object.h b/llvm/include/llvm-c/Object.h --- a/llvm/include/llvm-c/Object.h +++ b/llvm/include/llvm-c/Object.h @@ -64,7 +64,7 @@ * appropriate implementation selected. The context may be NULL except if * the resulting file is an LLVM IR file. * - * The memory buffer is not consumed by this function. It is the responsibilty + * The memory buffer is not consumed by this function. It is the responsibility * of the caller to free it with \c LLVMDisposeMemoryBuffer. * * If NULL is returned, the \p ErrorMessage parameter is populated with the @@ -80,7 +80,7 @@ /** * Dispose of a binary file. * - * The binary file does not own its backing buffer. It is the responsibilty + * The binary file does not own its backing buffer. It is the responsibility * of the caller to free it with \c LLVMDisposeMemoryBuffer. */ void LLVMDisposeBinary(LLVMBinaryRef BR); diff --git a/llvm/include/llvm-c/Orc.h b/llvm/include/llvm-c/Orc.h --- a/llvm/include/llvm-c/Orc.h +++ b/llvm/include/llvm-c/Orc.h @@ -839,7 +839,7 @@ /** * Notify all not-yet-emitted covered by this MaterializationResponsibility * instance that an error has occurred. - * This will remove all symbols covered by this MaterializationResponsibilty + * This will remove all symbols covered by this MaterializationResponsibility * from the target JITDylib, and send an error to any queries waiting on * these symbols. */ diff --git a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h --- a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h +++ b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This pass is used to evaluate branch probabilties. +// This pass is used to evaluate branch probabilities. // //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h --- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h @@ -103,7 +103,7 @@ Unsafe, }; - /// Dependece between memory access instructions. + /// Dependence between memory access instructions. struct Dependence { /// The type of the dependence. enum DepType { @@ -504,7 +504,7 @@ private: /// Groups pointers such that a single memcheck is required /// between two different groups. This will clear the CheckingGroups vector - /// and re-compute it. We will only group dependecies if \p UseDependencies + /// and re-compute it. We will only group dependencies if \p UseDependencies /// is true, otherwise we will create a separate group for each pointer. void groupChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies); diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h --- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h +++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h @@ -90,7 +90,7 @@ Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI), MSSAUsed(Arg.MSSAUsed) { // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. Arg.InnerAM = nullptr; } @@ -99,7 +99,7 @@ LI = RHS.LI; MSSAUsed = RHS.MSSAUsed; // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. RHS.InnerAM = nullptr; return *this; diff --git a/llvm/include/llvm/Analysis/LoopCacheAnalysis.h b/llvm/include/llvm/Analysis/LoopCacheAnalysis.h --- a/llvm/include/llvm/Analysis/LoopCacheAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopCacheAnalysis.h @@ -166,7 +166,7 @@ /// /// Intuitively a reference group represents memory references that access /// the same cache line. Conditions 1,2 above account for temporal reuse, while -/// contition 3 accounts for spacial reuse. +/// condition 3 accounts for spacial reuse. using ReferenceGroupTy = SmallVector, 8>; using ReferenceGroupsTy = SmallVector; diff --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h --- a/llvm/include/llvm/Analysis/MLModelRunner.h +++ b/llvm/include/llvm/Analysis/MLModelRunner.h @@ -18,7 +18,7 @@ /// MLModelRunner interface: abstraction of a mechanism for evaluating a /// tensorflow "saved model". -/// NOTE: feature indices are expected to be consistent all accross +/// NOTE: feature indices are expected to be consistent all across /// MLModelRunners (pertaining to the same model), and also Loggers (see /// TFUtils.h) class MLModelRunner { diff --git a/llvm/include/llvm/Analysis/RegionInfo.h b/llvm/include/llvm/Analysis/RegionInfo.h --- a/llvm/include/llvm/Analysis/RegionInfo.h +++ b/llvm/include/llvm/Analysis/RegionInfo.h @@ -547,7 +547,7 @@ /// /// After calling this function the BasicBlock RegionNodes will be stored at /// different memory locations. RegionNodes obtained before this function is - /// called are therefore not comparable to RegionNodes abtained afterwords. + /// called are therefore not comparable to RegionNodes obtained afterwords. void clearNodeCache(); /// @name Subregion Iterators diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -470,7 +470,7 @@ /// Relative lookup table entries consist of 32-bit offsets. /// Do not generate relative lookup tables for large code models - /// in 64-bit achitectures where 32-bit offsets might not be enough. + /// in 64-bit architectures where 32-bit offsets might not be enough. if (TM.getCodeModel() == CodeModel::Medium || TM.getCodeModel() == CodeModel::Large) return false; diff --git a/llvm/include/llvm/CodeGen/DFAPacketizer.h b/llvm/include/llvm/CodeGen/DFAPacketizer.h --- a/llvm/include/llvm/CodeGen/DFAPacketizer.h +++ b/llvm/include/llvm/CodeGen/DFAPacketizer.h @@ -178,7 +178,7 @@ return false; } - // Check if it is legal to prune dependece between SUI and SUJ. + // Check if it is legal to prune dependence between SUI and SUJ. virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) { return false; } diff --git a/llvm/include/llvm/CodeGen/LiveRangeCalc.h b/llvm/include/llvm/CodeGen/LiveRangeCalc.h --- a/llvm/include/llvm/CodeGen/LiveRangeCalc.h +++ b/llvm/include/llvm/CodeGen/LiveRangeCalc.h @@ -86,7 +86,7 @@ /// 2. LiveOut[MBB].second.getNode() == MBB /// The live-out value is defined in MBB. /// 3. forall P in preds(MBB): LiveOut[P] == LiveOut[MBB] - /// The live-out value passses through MBB. All predecessors must carry + /// The live-out value passes through MBB. All predecessors must carry /// the same value. /// /// The domtree node may be null, it can be computed. diff --git a/llvm/include/llvm/CodeGen/MIRPrinter.h b/llvm/include/llvm/CodeGen/MIRPrinter.h --- a/llvm/include/llvm/CodeGen/MIRPrinter.h +++ b/llvm/include/llvm/CodeGen/MIRPrinter.h @@ -34,7 +34,7 @@ /// you the correct list of successor blocks in most cases except for things /// like jump tables where the basic block references can't easily be found. /// The MIRPRinter will skip printing successors if they match the result of -/// this funciton and the parser will use this function to construct a list if +/// this function and the parser will use this function to construct a list if /// it is missing. void guessSuccessors(const MachineBasicBlock &MBB, SmallVectorImpl &Result, diff --git a/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h --- a/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h +++ b/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This pass is used to evaluate branch probabilties on machine basic blocks. +// This pass is used to evaluate branch probabilities on machine basic blocks. // //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/CodeGen/RegisterBankInfo.h b/llvm/include/llvm/CodeGen/RegisterBankInfo.h --- a/llvm/include/llvm/CodeGen/RegisterBankInfo.h +++ b/llvm/include/llvm/CodeGen/RegisterBankInfo.h @@ -77,7 +77,7 @@ void print(raw_ostream &OS) const; /// Check that the Mask is compatible with the RegBank. - /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask, + /// Indeed, if the RegBank cannot accomodate the "active bits" of the mask, /// there is no way this mapping is valid. /// /// \note This method does not check anything when assertions are disabled. diff --git a/llvm/include/llvm/CodeGen/RegisterPressure.h b/llvm/include/llvm/CodeGen/RegisterPressure.h --- a/llvm/include/llvm/CodeGen/RegisterPressure.h +++ b/llvm/include/llvm/CodeGen/RegisterPressure.h @@ -367,7 +367,7 @@ /// Track the max pressure within the region traversed so far. RegisterPressure &P; - /// Run in two modes dependending on whether constructed with IntervalPressure + /// Run in two modes depending on whether constructed with IntervalPressure /// or RegisterPressure. If requireIntervals is false, LIS are ignored. bool RequireIntervals; diff --git a/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h b/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h --- a/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h +++ b/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h @@ -181,9 +181,9 @@ struct FixedSizeInfo { /// The fixed byte size for fixed size forms. uint16_t NumBytes = 0; - /// Number of DW_FORM_address forms in this abbrevation declaration. + /// Number of DW_FORM_address forms in this abbreviation declaration. uint8_t NumAddrs = 0; - /// Number of DW_FORM_ref_addr forms in this abbrevation declaration. + /// Number of DW_FORM_ref_addr forms in this abbreviation declaration. uint8_t NumRefAddrs = 0; /// Number of 4 byte in DWARF32 and 8 byte in DWARF64 forms. uint8_t NumDwarfOffsets = 0; diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/llvm/include/llvm/ExecutionEngine/Orc/Core.h --- a/llvm/include/llvm/ExecutionEngine/Orc/Core.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/Core.h @@ -613,7 +613,7 @@ /// Notify all not-yet-emitted covered by this MaterializationResponsibility /// instance that an error has occurred. - /// This will remove all symbols covered by this MaterializationResponsibilty + /// This will remove all symbols covered by this MaterializationResponsibility /// from the target JITDylib, and send an error to any queries waiting on /// these symbols. void failMaterialization(); diff --git a/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h b/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h --- a/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h @@ -60,7 +60,7 @@ raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap::value_type &KV); -/// Render a SymbolDependendeMap. +/// Render a SymbolDependenceMap. raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps); /// Render a MaterializationUnit. diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h --- a/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h @@ -27,7 +27,7 @@ public: /// Create from a ExecutorProcessControl instance alone. This will use /// the EPC's lookupSymbols method to find the registration/deregistration - /// funciton addresses by name. + /// function addresses by name. static Expected> Create(ExecutionSession &ES); diff --git a/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h --- a/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h +++ b/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h @@ -144,7 +144,7 @@ virtual bool needsToReserveAllocationSpace() { return false; } /// Override to return false to tell LLVM no stub space will be needed. - /// This requires some guarantees depending on architecuture, but when + /// This requires some guarantees depending on architecture, but when /// you know what you are doing it saves allocated space. virtual bool allowStubAllocation() const { return true; } diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h --- a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h @@ -62,7 +62,7 @@ #include "llvm/Frontend/OpenMP/OMPKinds.def" /// IDs for all omp runtime library ident_t flag encodings (see -/// their defintion in openmp/runtime/src/kmp.h). +/// their definition in openmp/runtime/src/kmp.h). enum class IdentFlag { #define OMP_IDENT_FLAG(Enum, Str, Value) Enum = Value, #include "llvm/Frontend/OpenMP/OMPKinds.def" diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPContext.h b/llvm/include/llvm/Frontend/OpenMP/OMPContext.h --- a/llvm/include/llvm/Frontend/OpenMP/OMPContext.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPContext.h @@ -115,7 +115,7 @@ /// Variant match information describes the required traits and how they are /// scored (via the ScoresMap). In addition, the required consturct nesting is -/// decribed as well. +/// described as well. struct VariantMatchInfo { /// Add the trait \p Property to the required trait set. \p RawString is the /// string we parsed and derived \p Property from. If \p Score is not null, it diff --git a/llvm/include/llvm/IR/GlobalValue.h b/llvm/include/llvm/IR/GlobalValue.h --- a/llvm/include/llvm/IR/GlobalValue.h +++ b/llvm/include/llvm/IR/GlobalValue.h @@ -472,7 +472,7 @@ return !mayBeDerefined(); } - /// Return true if this global has an exact defintion. + /// Return true if this global has an exact definition. bool hasExactDefinition() const { // While this computes exactly the same thing as // isStrongDefinitionForLinker, the intended uses are different. This diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -235,7 +235,7 @@ // MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0080: ALL DS instructions may be scheduled across SCHED_BARRIER. -// MASK = 0x0000 0100: ALL DS read instructions may be scheduled accoss SCHED_BARRIER. +// MASK = 0x0000 0100: ALL DS read instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0200: ALL DS write instructions may be scheduled across SCHED_BARRIER. def int_amdgcn_sched_barrier : ClangBuiltin<"__builtin_amdgcn_sched_barrier">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrConvergent, diff --git a/llvm/include/llvm/IR/PassManager.h b/llvm/include/llvm/IR/PassManager.h --- a/llvm/include/llvm/IR/PassManager.h +++ b/llvm/include/llvm/IR/PassManager.h @@ -680,7 +680,7 @@ /// cyclic dependencies between analysis results. /// /// This returns true if the given analysis's result is invalid. Any - /// dependecies on it will become invalid as a result. + /// dependencies on it will become invalid as a result. template bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) { using ResultModelT = @@ -944,7 +944,7 @@ Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)) { // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. Arg.InnerAM = nullptr; } @@ -962,7 +962,7 @@ Result &operator=(Result &&RHS) { InnerAM = RHS.InnerAM; // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. RHS.InnerAM = nullptr; return *this; diff --git a/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h --- a/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h +++ b/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h @@ -106,7 +106,7 @@ * \defgroup Cache controlling options * * These entry points control the ThinLTO cache. The cache is intended to - * support incremental build, and thus needs to be persistent accross build. + * support incremental build, and thus needs to be persistent across build. * The client enabled the cache by supplying a path to an existing directory. * The code generator will use this to store objects files that may be reused * during a subsequent build. diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h --- a/llvm/include/llvm/MC/MCStreamer.h +++ b/llvm/include/llvm/MC/MCStreamer.h @@ -889,7 +889,7 @@ /// "foo.c"' assembler directive. virtual void emitFileDirective(StringRef Filename); - /// Emit ".file assembler diretive with additioal info. + /// Emit ".file assembler diretive with additional info. virtual void emitFileDirective(StringRef Filename, StringRef CompilerVerion, StringRef TimeStamp, StringRef Description); diff --git a/llvm/include/llvm/Support/BranchProbability.h b/llvm/include/llvm/Support/BranchProbability.h --- a/llvm/include/llvm/Support/BranchProbability.h +++ b/llvm/include/llvm/Support/BranchProbability.h @@ -56,7 +56,7 @@ static BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator); - // Normalize given probabilties so that the sum of them becomes approximate + // Normalize given probabilities so that the sum of them becomes approximate // one. template static void normalizeProbabilities(ProbabilityIter Begin, diff --git a/llvm/include/llvm/Support/RWMutex.h b/llvm/include/llvm/Support/RWMutex.h --- a/llvm/include/llvm/Support/RWMutex.h +++ b/llvm/include/llvm/Support/RWMutex.h @@ -19,7 +19,7 @@ #include #include -// std::shared_timed_mutex is only availble on macOS 10.12 and later. +// std::shared_timed_mutex is only available on macOS 10.12 and later. #if defined(__APPLE__) && defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) #if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200 #define LLVM_USE_RW_MUTEX_IMPL diff --git a/llvm/include/llvm/Support/TypeName.h b/llvm/include/llvm/Support/TypeName.h --- a/llvm/include/llvm/Support/TypeName.h +++ b/llvm/include/llvm/Support/TypeName.h @@ -18,7 +18,7 @@ /// /// This routine may fail on some platforms or for particularly unusual types. /// Do not use it for anything other than logging and debugging aids. It isn't -/// portable or dependendable in any real sense. +/// portable or dependable in any real sense. /// /// The returned StringRef will point into a static storage duration string. /// However, it may not be null terminated and may be some strangely aligned diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -2278,7 +2278,7 @@ /// The interface ensures that the assumed bits are always a subset of the known /// bits. Users can only add known bits and, except through adding known bits, /// they can only remove assumed bits. This should guarantee monotoniticy and -/// thereby the existence of a fixpoint (if used corretly). The fixpoint is +/// thereby the existence of a fixpoint (if used correctly). The fixpoint is /// reached when the assumed and known state/bits are equal. Users can /// force/inidicate a fixpoint. If an optimistic one is indicated, the known /// state will catch up with the assumed one, for a pessimistic fixpoint it is @@ -2536,7 +2536,7 @@ /// Set the assumed value to \p Value but never below the known one. void setAssumed(bool Value) { Assumed &= (Known | Value); } - /// Set the known and asssumed value to \p Value. + /// Set the known and assumed value to \p Value. void setKnown(bool Value) { Known |= Value; Assumed |= Value; @@ -3568,7 +3568,7 @@ /// } /// ``` /// In that case, AccessedBytesMap is `{0:4, 4:4, 8:4, 40:4}`. - /// AccessedBytesMap is std::map so it is iterated in accending order on + /// AccessedBytesMap is std::map so it is iterated in ascending order on /// key(Offset). So KnownBytes will be updated like this: /// /// |Access | KnownBytes diff --git a/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h b/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h --- a/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h +++ b/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h @@ -42,7 +42,7 @@ /// Computes function attributes in post-order over the call graph. /// /// By operating in post-order, this pass computes precise attributes for -/// called functions prior to processsing their callers. This "bottom-up" +/// called functions prior to processing their callers. This "bottom-up" /// approach allows powerful interprocedural inference of function attributes /// like memory access patterns, etc. It can discover functions that do not /// access memory, or only read memory, and give them the readnone/readonly diff --git a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h --- a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h +++ b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h @@ -417,7 +417,7 @@ /// The adaptor comes with two modes: the loop mode and the loop-nest mode, and /// the worklist updater lived inside will be in the same mode as the adaptor /// (refer to the documentation of \c LPMUpdater for more detailed explanation). -/// Specifically, in loop mode, all loops in the funciton will be pushed into +/// Specifically, in loop mode, all loops in the function will be pushed into /// the worklist and processed by \p Pass, while only top-level loops are /// processed in loop-nest mode. Please refer to the various specializations of /// \fn createLoopFunctionToLoopPassAdaptor to see when loop mode and loop-nest diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -1257,7 +1257,7 @@ return AliasResult::NoAlias; // Compute ranges of potentially accessed bytes for both accesses. If the - // interseciton is empty, there can be no overlap. + // intersection is empty, there can be no overlap. unsigned BW = OffsetRange.getBitWidth(); ConstantRange Range1 = OffsetRange.add( ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp --- a/llvm/lib/Analysis/DependenceAnalysis.cpp +++ b/llvm/lib/Analysis/DependenceAnalysis.cpp @@ -646,7 +646,7 @@ // Returns NoAlias/MayAliass/MustAlias for two memory locations based upon their // underlaying objects. If LocA and LocB are known to not alias (for any reason: -// tbaa, non-overlapping regions etc), then it is known there is no dependecy. +// tbaa, non-overlapping regions etc), then it is known there is no dependency. // Otherwise the underlying objects are checked to see if they point to // different identifiable objects. static AliasResult underlyingObjectsAlias(AAResults *AA, diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -2689,7 +2689,7 @@ default: return nullptr; - // Equality comaprisons are easy to fold. + // Equality comparisons are easy to fold. case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: break; diff --git a/llvm/lib/Analysis/LazyCallGraph.cpp b/llvm/lib/Analysis/LazyCallGraph.cpp --- a/llvm/lib/Analysis/LazyCallGraph.cpp +++ b/llvm/lib/Analysis/LazyCallGraph.cpp @@ -542,7 +542,7 @@ assert(SourceI > (SCCs.begin() + SourceIdx) && "Must have moved the source to fix the post-order."); assert(*std::prev(SourceI) == &TargetSCC && - "Last SCC to move should have bene the target."); + "Last SCC to move should have been the target."); // Return an empty range at the target SCC indicating there is nothing to // merge. diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -153,7 +153,7 @@ private: /// The objects necessary for carrying out an evaluation of the SavedModel. - /// They are expensive to set up, and we maintain them accross all the + /// They are expensive to set up, and we maintain them across all the /// evaluations of the model. TF_Session *Session = nullptr; TFGraphPtr Graph; diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -1372,7 +1372,7 @@ // that all the pointers in the group don't wrap. // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on - // peeling (if it is a non-reversed accsess -- see Case 3). + // peeling (if it is a non-reversed access -- see Case 3). if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) continue; if (Group->getMember(Group->getFactor() - 1)) diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1336,7 +1336,7 @@ OutStreamer->AddComment("number of basic blocks"); OutStreamer->emitULEB128IntValue(MF.size()); const MCSymbol *PrevMBBEndSymbol = FunctionSymbol; - // Emit BB Information for each basic block in the funciton. + // Emit BB Information for each basic block in the function. for (const MachineBasicBlock &MBB : MF) { const MCSymbol *MBBSymbol = MBB.isEntryBlock() ? FunctionSymbol : MBB.getSymbol(); diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -2152,7 +2152,7 @@ } /// Return ClassOptions that should be present on both the forward declaration -/// and the defintion of a tag type. +/// and the definition of a tag type. static ClassOptions getCommonClassOptions(const DICompositeType *Ty) { ClassOptions CO = ClassOptions::None; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1151,7 +1151,7 @@ M->debug_compile_units_end()); assert(NumDebugCUs > 0 && "Asm unexpectedly initialized"); assert(MMI->hasDebugInfo() && - "DebugInfoAvailabilty unexpectedly not initialized"); + "DebugInfoAvailability unexpectedly not initialized"); SingleCU = NumDebugCUs == 1; DenseMap> GVMap; @@ -1723,7 +1723,7 @@ for (auto &R : OpenRanges) Values.push_back(R.second); - // With Basic block sections, it is posssible that the StartLabel and the + // With Basic block sections, it is possible that the StartLabel and the // Instr are not in the same section. This happens when the StartLabel is // the function begin label and the dbg value appears in a basic block // that is not the entry. In this case, the range needs to be split to diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp --- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp @@ -398,7 +398,7 @@ computeActionsTable(LandingPads, Actions, FirstActions); // Compute the call-site table and call-site ranges. Normally, there is only - // one call-site-range which covers the whole funciton. With + // one call-site-range which covers the whole function. With // -basic-block-sections, there is one call-site-range per basic block // section. SmallVector CallSites; diff --git a/llvm/lib/CodeGen/BasicBlockSections.cpp b/llvm/lib/CodeGen/BasicBlockSections.cpp --- a/llvm/lib/CodeGen/BasicBlockSections.cpp +++ b/llvm/lib/CodeGen/BasicBlockSections.cpp @@ -12,7 +12,7 @@ // -fbasic-block-sections= option is used. Further, with profile information // only the subset of basic blocks with profiles are placed in separate sections // and the rest are grouped in a cold section. The exception handling blocks are -// treated specially to ensure they are all in one seciton. +// treated specially to ensure they are all in one section. // // Basic Block Sections // ==================== diff --git a/llvm/lib/CodeGen/BreakFalseDeps.cpp b/llvm/lib/CodeGen/BreakFalseDeps.cpp --- a/llvm/lib/CodeGen/BreakFalseDeps.cpp +++ b/llvm/lib/CodeGen/BreakFalseDeps.cpp @@ -136,7 +136,7 @@ const TargetRegisterClass *OpRC = TII->getRegClass(MI->getDesc(), OpIdx, TRI, *MF); - // If the instruction has a true dependency, we can hide the false depdency + // If the instruction has a true dependency, we can hide the false dependency // behind it. for (MachineOperand &CurrMO : MI->operands()) { if (!CurrMO.isReg() || CurrMO.isDef() || CurrMO.isUndef() || diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -7713,7 +7713,7 @@ // The register pressure on the IndirectBr edges is reduced because %GEPIOp is // no longer alive on them. // -// We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging +// We try to unmerge GEPs here in CodeGenPrepare, as opposed to limiting merging // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as // not to disable further simplications and optimizations as a result of GEP // merging. diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1652,7 +1652,7 @@ SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0); } - // Theres no unmerge type to target. Directly extract the bits from the + // There's no unmerge type to target. Directly extract the bits from the // source type unsigned DstSize = DstTy.getSizeInBits(); diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp --- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -353,7 +353,7 @@ return; } - // At this point, we need to repair a defintion of a terminator. + // At this point, we need to repair a definition of a terminator. // Technically we need to fix the def of MI on all outgoing // edges of MI to keep the repairing local. In other words, we diff --git a/llvm/lib/CodeGen/LiveVariables.cpp b/llvm/lib/CodeGen/LiveVariables.cpp --- a/llvm/lib/CodeGen/LiveVariables.cpp +++ b/llvm/lib/CodeGen/LiveVariables.cpp @@ -826,7 +826,7 @@ return false; } -/// addNewBlock - Add a new basic block BB as an empty succcessor to DomBB. All +/// addNewBlock - Add a new basic block BB as an empty successor to DomBB. All /// variables that are live out of DomBB will be marked as passing live through /// BB. void LiveVariables::addNewBlock(MachineBasicBlock *BB, @@ -875,7 +875,7 @@ } } -/// addNewBlock - Add a new basic block BB as an empty succcessor to DomBB. All +/// addNewBlock - Add a new basic block BB as an empty successor to DomBB. All /// variables that are live out of DomBB will be marked as passing live through /// BB. LiveInSets[BB] is *not* updated (because it is not needed during /// PHIElimination). diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -2770,7 +2770,7 @@ // Check live-in list of each MBB. If a register is live into MBB, check // that the register is in regsLiveOut of each predecessor block. Since - // this must come from a definition in the predecesssor or its live-in + // this must come from a definition in the predecessor or its live-in // list, this will catch a live-through case where the predecessor does not // have the register in its live-in list. This currently only checks // registers that have no aliases, are not allocatable and are not diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -1027,7 +1027,7 @@ int DefStageNum = Schedule.getStage(Def); unsigned StageNum = CurStageNum; if (DefStageNum != -1 && (int)InstrStageNum > DefStageNum) { - // Compute the difference in stages between the defintion and the use. + // Compute the difference in stages between the definition and the use. unsigned StageDiff = (InstrStageNum - DefStageNum); // Make an adjustment to get the last definition. StageNum -= StageDiff; diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -1094,7 +1094,7 @@ int64_t OffsetBeforeAlignment = Offset; Offset = alignTo(Offset, StackAlign, Skew); - // If we have increased the offset to fulfill the alignment constrants, + // If we have increased the offset to fulfill the alignment constraints, // then the scavenging spill slots may become harder to reach from the // stack pointer, float them so they stay close. if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -113,7 +113,7 @@ static cl::opt LargeIntervalFreqThreshold( "large-interval-freq-threshold", cl::Hidden, - cl::desc("For a large interval, if it is coalesed with other live " + cl::desc("For a large interval, if it is coalesced with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(100)); diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -920,7 +920,7 @@ !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad())) continue; - // Always add dependecy edge to BarrierChain if present. + // Always add dependency edge to BarrierChain if present. if (BarrierChain) BarrierChain->addPredBarrier(SU); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -11119,7 +11119,7 @@ SL->SwitchCases.push_back(CB); } -// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb +// Scale CaseProb after peeling a case with the probability of PeeledCaseProb // from the swith statement. static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb) { @@ -11181,7 +11181,7 @@ Clusters.erase(PeeledCaseIt); for (CaseCluster &CC : Clusters) { LLVM_DEBUG( - dbgs() << "Scale the probablity for one cluster, before scaling: " + dbgs() << "Scale the probability for one cluster, before scaling: " << CC.Prob << "\n"); CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb); LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n"); @@ -11211,7 +11211,7 @@ // if there are many clusters. sortAndRangeify(Clusters); - // The branch probablity of the peeled case. + // The branch probability of the peeled case. BranchProbability PeeledCaseProb = BranchProbability::getZero(); MachineBasicBlock *PeeledSwitchMBB = peelDominantCaseCluster(SI, Clusters, PeeledCaseProb); diff --git a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp --- a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp +++ b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp @@ -46,7 +46,7 @@ /// This pass can be disabled via the -enable-patchpoint-liveness=false flag. /// The pass skips functions that don't have any patchpoint intrinsics. The /// information provided by this pass is optional and not required by the -/// aformentioned intrinsic to function. +/// aforementioned intrinsic to function. class StackMapLiveness : public MachineFunctionPass { const TargetRegisterInfo *TRI; LivePhysRegs LiveRegs; diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp --- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -2375,7 +2375,7 @@ // read-only section by the compiler. // For BSS kind, zero initialized data must be emitted to the .data section // because external linkage control sections that get mapped to the .bss - // section will be linked as tentative defintions, which is only appropriate + // section will be linked as tentative definitions, which is only appropriate // for SectionKind::Common. if (Kind.isData() || Kind.isReadOnlyWithRel() || Kind.isBSS()) { if (TM.getDataSections()) { diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1057,7 +1057,7 @@ } } - // Check if the reschedule will not break depedencies. + // Check if the reschedule will not break dependencies. unsigned NumVisited = 0; for (MachineInstr &OtherMI : make_range(mi, MachineBasicBlock::iterator(KillMI))) { diff --git a/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp b/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp --- a/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp @@ -70,7 +70,7 @@ continue; } Optional ByteSize; - // If this abbrevation still has a fixed byte size, then update the + // If this abbreviation still has a fixed byte size, then update the // FixedAttributeSize as needed. switch (F) { case DW_FORM_addr: @@ -116,7 +116,7 @@ } else { // Attribute and form pairs must either both be non-zero, in which case // they are added to the abbreviation declaration, or both be zero to - // terminate the abbrevation declaration. In this case only one was + // terminate the abbreviation declaration. In this case only one was // zero which is an error. clear(); return false; diff --git a/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp b/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp --- a/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp +++ b/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp @@ -160,7 +160,7 @@ LR.FuncRange = {FuncAddr, FuncAddr + Data.getU32(&Offset)}; uint32_t NameOffset = Data.getU32(&Offset); // The "lookup" functions doesn't report errors as accurately as the "decode" - // function as it is meant to be fast. For more accurage errors we could call + // function as it is meant to be fast. For more accurate errors we could call // "decode". if (!Data.isValidOffset(Offset)) return createStringError(std::errc::io_error, diff --git a/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp b/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp --- a/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp +++ b/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp @@ -58,7 +58,7 @@ // FIXME: lli aims to provide both, RuntimeDyld and JITLink, as the dynamic // loaders for it's JIT implementations. And they both offer debugging via the // GDB JIT interface, which builds on the two well-known symbol names below. -// As these symbols must be unique accross the linked executable, we can only +// As these symbols must be unique across the linked executable, we can only // define them in one of the libraries and make the other depend on it. // OrcTargetProcess is a minimal stub for embedding a JIT client in remote // executors. For the moment it seems reasonable to have the definition there diff --git a/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp --- a/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// Generic COFF LinkGraph buliding code. +// Generic COFF LinkGraph building code. // //===----------------------------------------------------------------------===// #include "COFFLinkGraphBuilder.h" diff --git a/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp --- a/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// Generic ELF LinkGraph buliding code. +// Generic ELF LinkGraph building code. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp --- a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp @@ -551,7 +551,7 @@ PassConfiguration Config; const Triple &TT = G->getTargetTriple(); if (Ctx->shouldAddDefaultTargetPasses(TT)) { - // Add eh-frame passses. + // Add eh-frame passes. Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame")); Config.PrePrunePasses.push_back(EHFrameEdgeFixer( ".eh_frame", 8, aarch64::Pointer32, aarch64::Pointer64, diff --git a/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp --- a/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// Generic MachO LinkGraph buliding code. +// Generic MachO LinkGraph building code. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp --- a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp @@ -560,7 +560,7 @@ Config.PrePrunePasses.push_back( CompactUnwindSplitter("__LD,__compact_unwind")); - // Add eh-frame passses. + // Add eh-frame passes. // FIXME: Prune eh-frames for which compact-unwind is available once // we support compact-unwind registration with libunwind. Config.PrePrunePasses.push_back( diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp --- a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp @@ -475,7 +475,7 @@ PassConfiguration Config; if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) { - // Add eh-frame passses. + // Add eh-frame passes. Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_x86_64()); Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_x86_64()); diff --git a/llvm/lib/ExecutionEngine/Orc/Core.cpp b/llvm/lib/ExecutionEngine/Orc/Core.cpp --- a/llvm/lib/ExecutionEngine/Orc/Core.cpp +++ b/llvm/lib/ExecutionEngine/Orc/Core.cpp @@ -346,7 +346,7 @@ } } - // The OnResolveInfo struct will hold the aliases and responsibilty for each + // The OnResolveInfo struct will hold the aliases and responsibility for each // query in the list. struct OnResolveInfo { OnResolveInfo(std::unique_ptr R, @@ -920,7 +920,7 @@ MI.UnemittedDependencies.erase(&OtherJITDylib); } - // If this symbol dependended on any symbols in the error state then move + // If this symbol depended on any symbols in the error state then move // this symbol to the error state too. if (DependsOnSymbolInErrorState) Symbols[Name].setFlags(Symbols[Name].getFlags() | diff --git a/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp b/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp --- a/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp +++ b/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp @@ -232,7 +232,7 @@ if (auto *COFFObj = dyn_cast(&Obj)) { auto &ES = getExecutionSession(); - // For all resolved symbols that are not already in the responsibilty set: + // For all resolved symbols that are not already in the responsibility set: // check whether the symbol is in a comdat section and if so mark it as // weak. for (auto &Sym : COFFObj->symbols()) { diff --git a/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp b/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp --- a/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp +++ b/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp @@ -249,7 +249,7 @@ auto I = PendingCallWrapperResults.find(0); assert(PendingCallWrapperResults.size() == 1 && I != PendingCallWrapperResults.end() && - "Setup message handler not connectly set up"); + "Setup message handler not correctly set up"); auto SetupMsgHandler = std::move(I->second); PendingCallWrapperResults.erase(I); diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -1804,7 +1804,7 @@ } //===----------------------------------------------------------------------===// -// AttributeFuncs Function Defintions +// AttributeFuncs Function Definitions //===----------------------------------------------------------------------===// /// Which attributes cannot be applied to a type. diff --git a/llvm/lib/IR/BuiltinGCs.cpp b/llvm/lib/IR/BuiltinGCs.cpp --- a/llvm/lib/IR/BuiltinGCs.cpp +++ b/llvm/lib/IR/BuiltinGCs.cpp @@ -59,7 +59,7 @@ /// A GCStrategy which serves as an example for the usage of a statepoint based /// lowering strategy. This GCStrategy is intended to suitable as a default /// implementation usable with any collector which can consume the standard -/// stackmap format generated by statepoints, uses the default addrespace to +/// stackmap format generated by statepoints, uses the default addresspace to /// distinguish between gc managed and non-gc managed pointers, and has /// reasonable relocation semantics. class StatepointGC : public GCStrategy { diff --git a/llvm/lib/IR/PassManager.cpp b/llvm/lib/IR/PassManager.cpp --- a/llvm/lib/IR/PassManager.cpp +++ b/llvm/lib/IR/PassManager.cpp @@ -14,7 +14,7 @@ using namespace llvm; namespace llvm { -// Explicit template instantiations and specialization defininitions for core +// Explicit template instantiations and specialization definitions for core // template typedefs. template class AllAnalysesOn; template class AllAnalysesOn; diff --git a/llvm/lib/IR/ReplaceConstant.cpp b/llvm/lib/IR/ReplaceConstant.cpp --- a/llvm/lib/IR/ReplaceConstant.cpp +++ b/llvm/lib/IR/ReplaceConstant.cpp @@ -72,8 +72,8 @@ if (Insts) Insts->insert(NI); } else { - // We had already encountered CE, the correponding instruction already - // exist, use it to replace CE. + // We had already encountered CE, the corresponding instruction + // already exist, use it to replace CE. NI = Visited[CE]; } diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -594,7 +594,7 @@ return false; if (Symbol.isVariable() && Symbol.isUndefined()) { - // FIXME: this is here just to diagnose the case of a var = commmon_sym. + // FIXME: this is here just to diagnose the case of a var = common_sym. Layout.getBaseSymbol(Symbol); return false; } diff --git a/llvm/lib/MC/MCParser/AsmLexer.cpp b/llvm/lib/MC/MCParser/AsmLexer.cpp --- a/llvm/lib/MC/MCParser/AsmLexer.cpp +++ b/llvm/lib/MC/MCParser/AsmLexer.cpp @@ -716,7 +716,7 @@ if (CommentString.size() == 1) return CommentString[0] == Ptr[0]; - // Allow # preprocessor commments also be counted as comments for "##" cases + // Allow # preprocessor comments also be counted as comments for "##" cases if (CommentString[1] == '#') return CommentString[0] == Ptr[0]; diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -3131,7 +3131,7 @@ if (checkForValidSection()) return true; // Only support spaces as separators for .ascii directive for now. See the - // discusssion at https://reviews.llvm.org/D91460 for more details. + // discussion at https://reviews.llvm.org/D91460 for more details. do { if (parseEscapedString(Data)) return true; diff --git a/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/llvm/lib/MC/MCParser/ELFAsmParser.cpp --- a/llvm/lib/MC/MCParser/ELFAsmParser.cpp +++ b/llvm/lib/MC/MCParser/ELFAsmParser.cpp @@ -489,7 +489,7 @@ if (UniqueStr != "unique") return TokError("expected 'unique'"); if (L.isNot(AsmToken::Comma)) - return TokError("expected commma"); + return TokError("expected comma"); Lex(); if (getParser().parseAbsoluteExpression(UniqueID)) return true; diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp --- a/llvm/lib/MC/WasmObjectWriter.cpp +++ b/llvm/lib/MC/WasmObjectWriter.cpp @@ -1548,7 +1548,7 @@ } // Custom sections can also belong to COMDAT groups. In this case the - // decriptor's "index" field is the section index (in the final object + // descriptor's "index" field is the section index (in the final object // file), but that is not known until after layout, so it must be fixed up // later if (const MCSymbolWasm *C = Section.getGroup()) { diff --git a/llvm/lib/Option/OptTable.cpp b/llvm/lib/Option/OptTable.cpp --- a/llvm/lib/Option/OptTable.cpp +++ b/llvm/lib/Option/OptTable.cpp @@ -511,7 +511,7 @@ function_ref ErrorFn) const { SmallVector NewArgv; // The environment variable specifies initial options which can be overridden - // by commnad line options. + // by command line options. cl::expandResponseFiles(Argc, Argv, EnvVar, Saver, NewArgv); unsigned MAI, MAC; diff --git a/llvm/lib/ProfileData/MemProf.cpp b/llvm/lib/ProfileData/MemProf.cpp --- a/llvm/lib/ProfileData/MemProf.cpp +++ b/llvm/lib/ProfileData/MemProf.cpp @@ -101,7 +101,7 @@ } Result.push_back(static_cast(Tag)); } - // Advace the buffer to one past the schema if we succeeded. + // Advance the buffer to one past the schema if we succeeded. Buffer = Ptr; return Result; } diff --git a/llvm/lib/Support/FileUtilities.cpp b/llvm/lib/Support/FileUtilities.cpp --- a/llvm/lib/Support/FileUtilities.cpp +++ b/llvm/lib/Support/FileUtilities.cpp @@ -168,7 +168,7 @@ /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if the /// files match, 1 if they are different, and 2 if there is a file error. This -/// function differs from DiffFiles in that you can specify an absolete and +/// function differs from DiffFiles in that you can specify an absolute and /// relative FP error that is allowed to exist. If you specify a string to fill /// in for the error option, it will set the string to an error message if an /// error occurs, allowing the caller to distinguish between a failed diff and a diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp --- a/llvm/lib/Support/Triple.cpp +++ b/llvm/lib/Support/Triple.cpp @@ -633,7 +633,7 @@ static Triple::ObjectFormatType parseFormat(StringRef EnvironmentName) { return StringSwitch(EnvironmentName) - // "xcoff" must come before "coff" because of the order-dependendent + // "xcoff" must come before "coff" because of the order-dependent // pattern matching. .EndsWith("xcoff", Triple::XCOFF) .EndsWith("coff", Triple::COFF) diff --git a/llvm/lib/Support/Unix/Path.inc b/llvm/lib/Support/Unix/Path.inc --- a/llvm/lib/Support/Unix/Path.inc +++ b/llvm/lib/Support/Unix/Path.inc @@ -1138,7 +1138,7 @@ return std::error_code(); RealPath->clear(); #if defined(F_GETPATH) - // When F_GETPATH is availble, it is the quickest way to get + // When F_GETPATH is available, it is the quickest way to get // the real path name. char Buffer[PATH_MAX]; if (::fcntl(ResultFD, F_GETPATH, Buffer) != -1) diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp --- a/llvm/lib/TableGen/Record.cpp +++ b/llvm/lib/TableGen/Record.cpp @@ -1699,7 +1699,7 @@ return const_cast(this); // No doubt that there exists a record, so we should check if types are - // compatiable. + // compatible. return IntInit::get(getRecordKeeper(), CurRec->getType()->typeIsA(CheckType)); } @@ -1712,7 +1712,7 @@ return const_cast(this); } - // Check if types are compatiable. + // Check if types are compatible. return IntInit::get(getRecordKeeper(), DefInit::get(D)->getType()->typeIsA(CheckType)); } diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp --- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -575,7 +575,7 @@ // Update the CFG first. updateTailPHIs(); - // Save successor probabilties before removing CmpBB and Tail from their + // Save successor probabilities before removing CmpBB and Tail from their // parents. BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB); BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail); @@ -583,7 +583,7 @@ Head->removeSuccessor(CmpBB); CmpBB->removeSuccessor(Tail); - // If Head and CmpBB had successor probabilties, udpate the probabilities to + // If Head and CmpBB had successor probabilities, udpate the probabilities to // reflect the ccmp-conversion. if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10707,7 +10707,7 @@ if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) return SDValue(); - // The DUPQ operation is indepedent of element type so normalise to i64s. + // The DUPQ operation is independent of element type so normalise to i64s. SDValue Idx128 = Op.getOperand(2); // DUPQ can be used when idx is in range. diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -311,7 +311,7 @@ /// Returns true if the instruction has a shift by immediate that can be /// executed in one cycle less. static bool isFalkorShiftExtFast(const MachineInstr &MI); - /// Return true if the instructions is a SEH instruciton used for unwinding + /// Return true if the instructions is a SEH instruction used for unwinding /// on Windows. static bool isSEHInstruction(const MachineInstr &MI); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -6697,7 +6697,7 @@ return false; // Find Definition. - assert(MI.getParent() && "Incomplete machine instruciton\n"); + assert(MI.getParent() && "Incomplete machine instruction\n"); MachineBasicBlock *MBB = MI.getParent(); MachineFunction *MF = MBB->getParent(); MachineRegisterInfo *MRI = &MF->getRegInfo(); diff --git a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp --- a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp +++ b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp @@ -372,7 +372,7 @@ InstCount--; break; case FrameHelperType::PrologFrame: { - // Effecitvely no change in InstCount since FpAdjusment is included. + // Effecitvely no change in InstCount since FpAdjustment is included. break; } case FrameHelperType::Epilog: diff --git a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp --- a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp +++ b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp @@ -1,4 +1,4 @@ -//===- AArch64SLSHardening.cpp - Harden Straight Line Missspeculation -----===// +//===- AArch64SLSHardening.cpp - Harden Straight Line Misspeculation -----===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp --- a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp +++ b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp @@ -1,4 +1,4 @@ -//===- AArch64SpeculationHardening.cpp - Harden Against Missspeculation --===// +//===- AArch64SpeculationHardening.cpp - Harden Against Misspeculation --===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -196,7 +196,7 @@ void addSchedBarrierEdges(SUnit &SU); // Use a SCHED_BARRIER's mask to identify instruction SchedGroups that should - // not be reordered accross the SCHED_BARRIER. This is used for the base + // not be reordered across the SCHED_BARRIER. This is used for the base // SCHED_BARRIER, and not SCHED_GROUP_BARRIER. The difference is that // SCHED_BARRIER will always block all instructions that can be classified // into a particular SchedClass, whereas SCHED_GROUP_BARRIER has a fixed size diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -2773,7 +2773,7 @@ if (AFI.shouldSignReturnAddress()) { // The order of register must match the order we push them, because the // PEI assigns frame indices in that order. When compiling for return - // address sign and authenication, we use split push, therefore the orders + // address sign and authentication, we use split push, therefore the orders // we want are: // LR, R7, R6, R5, R4, , R11, R10, R9, R8, D15-D8 CSI.insert(find_if(CSI, diff --git a/llvm/lib/Target/ARM/ARMSLSHardening.cpp b/llvm/lib/Target/ARM/ARMSLSHardening.cpp --- a/llvm/lib/Target/ARM/ARMSLSHardening.cpp +++ b/llvm/lib/Target/ARM/ARMSLSHardening.cpp @@ -1,4 +1,4 @@ -//===- ARMSLSHardening.cpp - Harden Straight Line Missspeculation ---------===// +//===- ARMSLSHardening.cpp - Harden Straight Line Misspeculation ---------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1774,7 +1774,7 @@ bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat; auto LT = TLI->getTypeLegalizationCost(DL, ICA.getArgTypes()[0]); EVT MTy = TLI->getValueType(DL, ICA.getReturnType()); - // Check for the legal types, with the corect subtarget features. + // Check for the legal types, with the correct subtarget features. if ((ST->hasVFP2Base() && LT.second == MVT::f32 && MTy == MVT::i32) || (ST->hasFP64() && LT.second == MVT::f64 && MTy == MVT::i32) || (ST->hasFullFP16() && LT.second == MVT::f16 && MTy == MVT::i32)) diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -1157,9 +1157,9 @@ // Directive not convertable to compact unwind, bail out. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() - << "CFI directive not compatiable with comact " - "unwind encoding, opcode=" << Inst.getOperation() - << "\n"); + << "CFI directive not compatible with compact " + "unwind encoding, opcode=" + << Inst.getOperation() << "\n"); return CU::UNWIND_ARM_MODE_DWARF; break; } diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -481,7 +481,7 @@ unsigned EncodedValue, const MCSubtargetInfo &STI) const { if (isThumb2(STI)) { - // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved + // NEON Thumb2 data-processing encodings are very simple: bit 24 is moved // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are // set to 1111. unsigned Bit24 = EncodedValue & 0x01000000; diff --git a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp --- a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp +++ b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp @@ -857,7 +857,7 @@ if (PrevVCMP) { if (MachineOperand *MO = Instr.findRegisterUseOperand( PrevVCMP->getOperand(0).getReg(), /*isKill*/ true)) { - // If we come accross the instr that kills PrevVCMP's result, record it + // If we come across the instr that kills PrevVCMP's result, record it // so we can remove the kill flag later if we need to. PrevVCMPResultKiller = MO; } diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp --- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp +++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp @@ -320,7 +320,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = I->getOperand(0).getMBB(); continue; } diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp --- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp +++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp @@ -51,7 +51,7 @@ // !llvm.preserve.access.index // // Bitfield member access needs special attention. User cannot take the -// address of a bitfield acceess. To facilitate kernel verifier +// address of a bitfield access. To facilitate kernel verifier // for easy bitfield code optimization, a new clang intrinsic is introduced: // uint32_t __builtin_preserve_field_info(member_access, info_kind) // In IR, a chain with two (or more) intrinsic calls will be generated: diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.cpp b/llvm/lib/Target/BPF/BPFInstrInfo.cpp --- a/llvm/lib/Target/BPF/BPFInstrInfo.cpp +++ b/llvm/lib/Target/BPF/BPFInstrInfo.cpp @@ -204,7 +204,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = I->getOperand(0).getMBB(); continue; } diff --git a/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp b/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp --- a/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp +++ b/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp @@ -11,7 +11,7 @@ // ldd r2, r1, 0 // add r3, struct_base_reg, r2 // -// Here @global should represent an AMA (abstruct member access). +// Here @global should represent an AMA (abstract member access). // Such an access is subject to bpf load time patching. After this pass, the // code becomes // ld_imm64 r1, @global diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp --- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp +++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp @@ -330,7 +330,7 @@ MachineFunction &MF = DAG.getMachineFunction(); - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp --- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -13,7 +13,7 @@ // Having said that, we should re-attempt to pull this earlier at some point // in future. -// The basic approach looks for sequence of predicated jump, compare instruciton +// The basic approach looks for sequence of predicated jump, compare instruction // that genereates the predicate and, the feeder to the predicate. Once it finds // all, it collapses compare and jump instruction into a new value jump // intstructions. diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h @@ -97,7 +97,7 @@ // together. bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override; - // isLegalToPruneDependencies - Is it legal to prune dependece between SUI + // isLegalToPruneDependencies - Is it legal to prune dependence between SUI // and SUJ. bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override; diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -988,7 +988,7 @@ // We attempt to detect it by analyzing existing dependencies in the packet. // Analyze relationships between all existing members of the packet. - // Look for Anti dependecy on the same predicate reg as used in the + // Look for Anti dependency on the same predicate reg as used in the // candidate. for (auto I : CurrentPacketMIs) { // Scheduling Unit for current insn in the packet. diff --git a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp --- a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp @@ -326,7 +326,7 @@ return false; // This check is in place specifically for intrinsics. isSameOperationAs will // return two for any two hexagon intrinsics because they are essentially the - // same instruciton (CallInst). We need to scratch the surface to see if they + // same instruction (CallInst). We need to scratch the surface to see if they // are calls to the same function. if (CallInst *C1 = dyn_cast(I1)) { if (CallInst *C2 = dyn_cast(I2)) { diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td --- a/llvm/lib/Target/M68k/M68kInstrInfo.td +++ b/llvm/lib/Target/M68k/M68kInstrInfo.td @@ -464,7 +464,7 @@ // Complex Patterns //===----------------------------------------------------------------------===// -// NOTE Though this CP is not strictly necessarily it will simplify instruciton +// NOTE Though this CP is not strictly necessarily it will simplify instruction // definitions def MxCP_ARI : ComplexPattern; diff --git a/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp b/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp --- a/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp +++ b/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// This file contains defintions for M68k code emitter. +/// This file contains definitions for M68k code emitter. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp --- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -209,7 +209,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = I->getOperand(0).getMBB(); continue; } diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -6476,7 +6476,7 @@ } MCBinaryExpr::Opcode Opcode; // GAS and LLVM treat comparison operators different. GAS will generate -1 - // or 0, while LLVM will generate 0 or 1. Since a comparsion operator is + // or 0, while LLVM will generate 0 or 1. Since a comparison operator is // highly unlikely to be found in a memory offset expression, we don't // handle them. switch (Tok.getKind()) { diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -3631,7 +3631,7 @@ MipsFI->setVarArgsFrameIndex(0); - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -1008,7 +1008,7 @@ CurDAG->getDataLayout().getPointerSizeInBits(MemSD->getAddressSpace()); // Volatile Setting - // - .volatile is only availalble for .global and .shared + // - .volatile is only available for .global and .shared bool IsVolatile = MemSD->isVolatile(); if (CodeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL && CodeAddrSpace != NVPTX::PTXLdStInstCode::SHARED && diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp @@ -183,7 +183,7 @@ emitLabel(LabelSym, Inst.getLoc()); } -// This funciton checks if the parameter Inst is part of the setup for a link +// This function checks if the parameter Inst is part of the setup for a link // time GOT PC Relative optimization. For example in this situation: // // >)>> @@ -195,7 +195,7 @@ // and has the flag MCSymbolRefExpr::VK_PPC_PCREL_OPT. After that we just look // at the opcode and in the case of PLDpc we will return true. For the load // (or store) this function will return false indicating it has found the second -// instruciton in the pair. +// instruction in the pair. Optional llvm::isPartOfGOTToPCRelPair(const MCInst &Inst, const MCSubtargetInfo &STI) { // Need at least two operands. diff --git a/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp b/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp --- a/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp +++ b/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp @@ -76,7 +76,7 @@ if (!isa(CI)) return false; - // FIXME: no-errno and trapping-math need to be set for MASS converstion + // FIXME: no-errno and trapping-math need to be set for MASS conversion // but they don't have IR representation. return CI.hasNoNaNs() && CI.hasNoInfs() && CI.hasNoSignedZeros(); } diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -296,8 +296,8 @@ /// SelectAddrIdx - Given the specified address, check to see if it can be /// represented as an indexed [r+r] operation. /// This is for xform instructions whose associated displacement form is D. - /// The last parameter \p 0 means associated D form has no requirment for 16 - /// bit signed displacement. + /// The last parameter \p 0 means associated D form has no requirement for + /// 16 bit signed displacement. /// Returns false if it can be represented by [r+imm], which are preferred. bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) { return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, None); @@ -333,8 +333,8 @@ /// SelectAddrImm - Returns true if the address N can be represented by /// a base register plus a signed 16-bit displacement [r+imm]. - /// The last parameter \p 0 means D form has no requirment for 16 bit signed - /// displacement. + /// The last parameter \p 0 means D form has no requirement for 16 bit + /// signed displacement. bool SelectAddrImm(SDValue N, SDValue &Disp, SDValue &Base) { return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, None); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -320,7 +320,7 @@ PPC32_GOT, /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and - /// local dynamic TLS and position indendepent code on PPC32. + /// local dynamic TLS and position independent code on PPC32. PPC32_PICGOT, /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -5230,7 +5230,7 @@ unsigned RetOpc = 0; // This is a call through a function pointer. if (CFlags.IsIndirect) { - // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross + // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer across // indirect calls. The save of the caller's TOC pointer to the stack will be // inserted into the DAG as part of call lowering. The restore of the TOC // pointer is modeled by using a pseudo instruction for the call opcode that @@ -5244,7 +5244,7 @@ assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI."); RetOpc = PPCISD::CALL_NOTOC; } else if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) - // The ABIs that maintain a TOC pointer accross calls need to have a nop + // The ABIs that maintain a TOC pointer across calls need to have a nop // immediately following the call instruction if the caller and callee may // have different TOC bases. At link time if the linker determines the calls // may not share a TOC base, the call is redirected to a trampoline inserted @@ -9256,7 +9256,7 @@ // Exclude somes case where LD_SPLAT is worse than scalar_to_vector: // Below cases should also happen for "lfiwzx/lfiwax + LE target + index // 1" and "lxvrhx + BE target + index 7" and "lxvrbx + BE target + index - // 15", but funciton IsValidSplatLoad() now will only return true when + // 15", but function IsValidSplatLoad() now will only return true when // the data at index 0 is not nullptr. So we will not get into trouble for // these cases. // diff --git a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp --- a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp +++ b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp @@ -640,7 +640,7 @@ // ... // %add = getelementptr %phinode, %inc // -// First returned instruciton is %phinode (or a type cast to %phinode), caller +// First returned instruction is %phinode (or a type cast to %phinode), caller // needs this value to rewrite other load/stores in the same chain. // Second returned instruction is %add, caller needs this value to rewrite other // load/stores in the same chain. diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -1379,7 +1379,7 @@ bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr); // We cannot optimize an unsupported compare opcode or - // a mix of 32-bit and 64-bit comaprisons + // a mix of 32-bit and 64-bit comparisons if (!isSupportedCmpOp(CMPI1->getOpcode()) || !isSupportedCmpOp(CMPI2->getOpcode()) || is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode())) diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td b/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td --- a/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td @@ -50,7 +50,7 @@ // The AllocationPriority is in the range [0, 63]. Assigned the ACC registers // the highest possible priority in this range to force the register allocator // to assign these registers first. This is done because the ACC registers - // must represent 4 advacent vector registers. For example ACC1 must be + // must represent 4 adjacent vector registers. For example ACC1 must be // VS4 - VS7. The value here must be at least 32 as we want to allocate // these registers even before we allocate global ranges. let AllocationPriority = 63; diff --git a/llvm/lib/Target/PowerPC/README_ALTIVEC.txt b/llvm/lib/Target/PowerPC/README_ALTIVEC.txt --- a/llvm/lib/Target/PowerPC/README_ALTIVEC.txt +++ b/llvm/lib/Target/PowerPC/README_ALTIVEC.txt @@ -103,7 +103,7 @@ //===----------------------------------------------------------------------===// -The code generated for this is truly aweful: +The code generated for this is truly awful: vector float test(float a, float b) { return (vector float){ 0.0, a, 0.0, 0.0}; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -10973,7 +10973,7 @@ EVT PtrVT = getPointerTy(DAG.getDataLayout()); MVT XLenVT = Subtarget.getXLenVT(); unsigned XLenInBytes = Subtarget.getXLen() / 8; - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td --- a/llvm/lib/Target/Sparc/SparcInstrInfo.td +++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td @@ -1587,7 +1587,7 @@ Requires<[HasHardQuad]>; } -// Floating point conditional move instrucitons with %fcc0-%fcc3. +// Floating point conditional move instructions with %fcc0-%fcc3. let Predicates = [HasV9] in { let Constraints = "$f = $rd", intcc = 0 in { def V9MOVFCCrr diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -410,7 +410,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = Branch.getMBBTarget(); continue; } diff --git a/llvm/lib/Target/WebAssembly/README.txt b/llvm/lib/Target/WebAssembly/README.txt --- a/llvm/lib/Target/WebAssembly/README.txt +++ b/llvm/lib/Target/WebAssembly/README.txt @@ -17,7 +17,7 @@ applications that can run in browsers and other environments. wasi-sdk provides a more minimal C/C++ SDK based on clang, llvm and a libc based -on musl, for producing WebAssemmbly applictions that use the WASI ABI. +on musl, for producing WebAssemmbly applications that use the WASI ABI. Rust provides WebAssembly support integrated into Cargo. There are two main options: diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp @@ -190,8 +190,8 @@ } // Add BBs to exceptions' block set. This is a preparation to take out - // remaining incorect BBs from exceptions, because we need to iterate over BBs - // for each exception. + // remaining incorrect BBs from exceptions, because we need to iterate over + // BBs for each exception. for (auto *DomNode : post_order(&MDT)) { MachineBasicBlock *MBB = DomNode->getBlock(); WebAssemblyException *WE = getExceptionFor(MBB); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp @@ -41,7 +41,7 @@ char WebAssemblyFixBrTableDefaults::ID = 0; -// Target indepedent selection dag assumes that it is ok to use PointerTy +// Target independent selection dag assumes that it is ok to use PointerTy // as the index for a "switch", whereas Wasm so far only has a 32-bit br_table. // See e.g. SelectionDAGBuilder::visitJumpTableHeader // We have a 64-bit br_table in the tablegen defs as a result, which does get diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -250,7 +250,7 @@ for (auto T : {MVT::v2i64, MVT::v2f64}) setOperationAction(Op, T, Expand); - // But saturating fp_to_int converstions are + // But saturating fp_to_int conversions are for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) setOperationAction(Op, MVT::v4i32, Custom); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -47,7 +47,7 @@ case WebAssembly::CONST_F32: case WebAssembly::CONST_F64: // isReallyTriviallyReMaterializableGeneric misses these because of the - // ARGUMENTS implicit def, so we manualy override it here. + // ARGUMENTS implicit def, so we manually override it here. return true; default: return false; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -531,7 +531,7 @@ // Macro fusion actually happens and there is no other fragment inserted // after the previous instruction. // - // Do nothing here since we already inserted a BoudaryAlign fragment when + // Do nothing here since we already inserted a BoundaryAlign fragment when // we met the first instruction in the fused pair and we'll tie them // together in emitInstructionEnd. // diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -5238,7 +5238,7 @@ SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); - // Multiply is commmutative. + // Multiply is commutative. if (!foldedLoad) { foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); if (foldedLoad) diff --git a/llvm/lib/Target/X86/X86InstrFMA3Info.h b/llvm/lib/Target/X86/X86InstrFMA3Info.h --- a/llvm/lib/Target/X86/X86InstrFMA3Info.h +++ b/llvm/lib/Target/X86/X86InstrFMA3Info.h @@ -43,7 +43,7 @@ /// This bit must be set in the 'Attributes' field of FMA group if such /// group of FMA opcodes consists of AVX512 opcodes accepting a k-mask and /// passing the elements from the 1st operand to the result of the operation - /// when the correpondings bits in the k-mask are unset. + /// when the corresponding bits in the k-mask are unset. KMergeMasked = 0x2, /// This bit must be set in the 'Attributes' field of FMA group if such diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -1010,7 +1010,7 @@ // Scalar SSE intrinsic fragments to match several different types of loads. // Used by scalar SSE intrinsic instructions which have 128 bit types, but // only load a single element. -// FIXME: We should add more canolicalizing in DAGCombine. Particulary removing +// FIXME: We should add more canonicalizing in DAGCombine. Particulary removing // the simple_load case. def sse_load_f16 : PatFrags<(ops node:$ptr), [(v8f16 (simple_load node:$ptr)), diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1393,7 +1393,7 @@ if (MinSize == 2 && Subtarget->is32Bit() && Subtarget->isTargetWindowsMSVC() && (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) { - // For compatibilty reasons, when targetting MSVC, is is important to + // For compatibility reasons, when targetting MSVC, is is important to // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools // rely specifically on this pattern to be able to patch a function. // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE. diff --git a/llvm/lib/Target/X86/X86PreAMXConfig.cpp b/llvm/lib/Target/X86/X86PreAMXConfig.cpp --- a/llvm/lib/Target/X86/X86PreAMXConfig.cpp +++ b/llvm/lib/Target/X86/X86PreAMXConfig.cpp @@ -233,7 +233,7 @@ continue; IntrinsicInst *TileDef = dyn_cast(Op); assert((TileDef && isTileLoad(TileDef)) && - "All KeyAMX's tile definiation should comes from TileLoad!"); + "All KeyAMX's tile definition should comes from TileLoad!"); Shapes.push_back(TileDef->getOperand(0)); Shapes.push_back(TileDef->getOperand(1)); } diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -580,7 +580,7 @@ if (TermIt == MBB.end() || !TermIt->isBranch()) continue; - // Add all the non-EH-pad succossors to the blocks we want to harden. We + // Add all the non-EH-pad successors to the blocks we want to harden. We // skip EH pads because there isn't really a condition of interest on // entering. for (MachineBasicBlock *SuccMBB : MBB.successors()) diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -4133,7 +4133,7 @@ CurrVecTy->getNumElements() / CurrNumEltPerOp); assert(DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && - "coalesciing elements doesn't change vector width."); + "coalescing elements doesn't change vector width."); while (NumEltRemaining > 0) { assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"); diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp --- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -1230,7 +1230,7 @@ Shape.AsyncLowering.getContextAlignment()); if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { report_fatal_error( - "The alignment requirment of frame variables cannot be higher than " + "The alignment requirement of frame variables cannot be higher than " "the alignment of the async function context"); } break; @@ -2687,7 +2687,7 @@ } // Later code makes structural assumptions about single predecessors phis e.g - // that they are not live accross a suspend point. + // that they are not live across a suspend point. cleanupSinglePredPHIs(F); // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -1288,7 +1288,7 @@ if (!BR || !BR->isConditional() || CondCmp != BR->getCondition()) return false; - // And the comparsion looks like : %cond = icmp eq i8 %V, constant. + // And the comparison looks like : %cond = icmp eq i8 %V, constant. // So we try to resolve constant for the first operand only since the // second operand should be literal constant by design. ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0)); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -707,7 +707,7 @@ } // namespace PointerInfo } // namespace AA -/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. +/// Helper for AA::PointerInfo::Access DenseMap/Set usage. template <> struct DenseMapInfo : DenseMapInfo { using Access = AAPointerInfo::Access; @@ -722,7 +722,7 @@ struct DenseMapInfo : DenseMapInfo> {}; -/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign +/// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign /// but the instruction struct AccessAsInstructionInfo : DenseMapInfo { using Base = DenseMapInfo; diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -119,7 +119,7 @@ static cl::opt AlwaysInlineDeviceFunctions( "openmp-opt-inline-device", - cl::desc("Inline all applicible functions on the device."), cl::Hidden, + cl::desc("Inline all applicable functions on the device."), cl::Hidden, cl::init(false)); static cl::opt diff --git a/llvm/lib/Transforms/IPO/PartialInlining.cpp b/llvm/lib/Transforms/IPO/PartialInlining.cpp --- a/llvm/lib/Transforms/IPO/PartialInlining.cpp +++ b/llvm/lib/Transforms/IPO/PartialInlining.cpp @@ -749,9 +749,9 @@ // outlined region is predicted to be likely, its probability needs // to be made higher (more biased) to not under-estimate the cost of // function outlining. On the other hand, if the outlined region - // is predicted to be less likely, the predicted probablity is usually + // is predicted to be less likely, the predicted probability is usually // higher than the actual. For instance, the actual probability of the - // less likely target is only 5%, but the guessed probablity can be + // less likely target is only 5%, but the guessed probability can be // 40%. In the latter case, there is no need for further adjustement. // FIXME: add an option for this. if (OutlineRegionRelFreq < BranchProbability(45, 100)) diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -1333,7 +1333,7 @@ // this callsite that makes this inlining potentially illegal. Need to // set ComputeFullInlineCost, otherwise getInlineCost may return early // when cost exceeds threshold without checking all IRs in the callee. - // The acutal cost does not matter because we only checks isNever() to + // The actual cost does not matter because we only checks isNever() to // see if it is legal to inline the callsite. InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params, GetTTI(*Callee), GetAC, GetTLI); diff --git a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp --- a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp @@ -75,7 +75,7 @@ if (F->isDeclaration()) return false; // Skip function that will not be emitted into object file. The prevailing - // defintion will be verified instead. + // definition will be verified instead. if (F->hasAvailableExternallyLinkage()) return false; // Do a name matching. diff --git a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp --- a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp +++ b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp @@ -187,7 +187,7 @@ Function::Create(EmptyFT, GlobalValue::ExternalLinkage, F.getAddressSpace(), "", &M); NewF->copyAttributesFrom(&F); - // Only copy function attribtues. + // Only copy function attributes. NewF->setAttributes(AttributeList::get(M.getContext(), AttributeList::FunctionIndex, F.getAttributes().getFnAttrs())); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -89,12 +89,12 @@ } const APFloat &getFpVal() const { - assert(IsFp && BufHasFpVal && "Incorret state"); + assert(IsFp && BufHasFpVal && "Incorrect state"); return *getFpValPtr(); } APFloat &getFpVal() { - assert(IsFp && BufHasFpVal && "Incorret state"); + assert(IsFp && BufHasFpVal && "Incorrect state"); return *getFpValPtr(); } diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1549,7 +1549,7 @@ // Follow host instrumentation for global and constant addresses. if (PtrTy->getPointerAddressSpace() != 0) return InsertBefore; - // Instrument generic addresses in supported addressspaces. + // Instrument generic addresses in supported address spaces. IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()); Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {AddrLong}); diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp --- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -1729,7 +1729,7 @@ // doesn't either a) tell us the loop exits on the first iteration (unless // *all* exits are predicateable) or b) tell us *which* exit might be taken. // This transformation looks a lot like a restricted form of dead loop - // elimination, but restricted to read-only loops and without neccesssarily + // elimination, but restricted to read-only loops and without neccessarily // needing to kill the loop entirely. if (!LoopPredication) return false; diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp --- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp +++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp @@ -428,7 +428,7 @@ // order. Thus, if FC0 comes *before* FC1 in a FusionCandidateSet, then FC0 // dominates FC1 and FC1 post-dominates FC0. // std::set was chosen because we want a sorted data structure with stable -// iterators. A subsequent patch to loop fusion will enable fusing non-ajdacent +// iterators. A subsequent patch to loop fusion will enable fusing non-adjacent // loops by moving intervening code around. When this intervening code contains // loops, those loops will be moved also. The corresponding FusionCandidates // will also need to be moved accordingly. As this is done, having stable @@ -739,7 +739,7 @@ if (TC0 == 0 || TC1 == 0) { LLVM_DEBUG(dbgs() << "Loop(s) do not have a single exit point or do not " "have a constant number of iterations. Peeling " - "is not benefical\n"); + "is not beneficial\n"); return {false, None}; } diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -182,7 +182,7 @@ // forward and backward dependences qualify. Disqualify loads that have // other unknown dependences. - SmallPtrSet LoadsWithUnknownDepedence; + SmallPtrSet LoadsWithUnknownDependence; for (const auto &Dep : *Deps) { Instruction *Source = Dep.getSource(LAI); @@ -190,9 +190,9 @@ if (Dep.Type == MemoryDepChecker::Dependence::Unknown) { if (isa(Source)) - LoadsWithUnknownDepedence.insert(Source); + LoadsWithUnknownDependence.insert(Source); if (isa(Destination)) - LoadsWithUnknownDepedence.insert(Destination); + LoadsWithUnknownDependence.insert(Destination); continue; } @@ -219,9 +219,9 @@ Candidates.emplace_front(Load, Store); } - if (!LoadsWithUnknownDepedence.empty()) + if (!LoadsWithUnknownDependence.empty()) Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) { - return LoadsWithUnknownDepedence.count(C.Load); + return LoadsWithUnknownDependence.count(C.Load); }); return Candidates; @@ -245,7 +245,7 @@ /// However, we know that this is not the case here, i.e. we can rely on LAA /// to provide us with loop-independent dependences for the cases we're /// interested. Consider the case for example where a loop-independent - /// dependece S1->S2 invalidates the forwarding S3->S2. + /// dependence S1->S2 invalidates the forwarding S3->S2. /// /// A[i] = ... (S1) /// ... = A[i] (S2) diff --git a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp --- a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -474,7 +474,7 @@ NumLoopBlocksDeleted += DeadLoopBlocks.size(); } - /// Constant-fold terminators of blocks acculumated in FoldCandidates into the + /// Constant-fold terminators of blocks accumulated in FoldCandidates into the /// unconditional branches. void foldTerminators() { for (BasicBlock *BB : FoldCandidates) { diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -4856,7 +4856,7 @@ /// Now count registers number mathematical expectation for each formula: /// Note that for each use we exclude probability if not selecting for the use. /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding -/// probabilty 1/3 of not selecting for Use1). +/// probability 1/3 of not selecting for Use1). /// Use1: /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp --- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -487,7 +487,7 @@ // In various bits below, we rely on the fact that uses are reachable from // defs. When there are basic blocks unreachable from the entry, dominance - // and reachablity queries return non-sensical results. Thus, we preprocess + // and reachability queries return non-sensical results. Thus, we preprocess // the function to ensure these properties hold. Modified |= removeUnreachableBlocks(F); diff --git a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp --- a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp +++ b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp @@ -29,7 +29,7 @@ namespace llvm { cl::opt ShouldPreserveAllAttributes( "assume-preserve-all", cl::init(false), cl::Hidden, - cl::desc("enable preservation of all attrbitues. even those that are " + cl::desc("enable preservation of all attributes. even those that are " "unlikely to be usefull")); cl::opt EnableKnowledgeRetention( diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -1845,8 +1845,9 @@ }); LLVM_DEBUG(if (verifyFunction(*oldFunction)) report_fatal_error("verification of oldFunction failed!")); - LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC)) - report_fatal_error("Stale Asumption cache for old Function!")); + LLVM_DEBUG( + if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC)) + report_fatal_error("Stale Assumption cache for old Function!")); return newFunction; } diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp --- a/llvm/lib/Transforms/Utils/LoopPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -310,7 +310,7 @@ continue; // If not, give up. // However, for equality comparisons, that isn't always sufficient to - // eliminate the comparsion in loop body, we may need to peel one more + // eliminate the comparison in loop body, we may need to peel one more // iteration. See if that makes !Pred become unknown again. if (ICmpInst::isEquality(Pred) && !SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), NextIterVal, @@ -330,7 +330,7 @@ /// This "heuristic" exactly matches implicit behavior which used to exist /// inside getLoopEstimatedTripCount. It was added here to keep an -/// improvement inside that API from causing peeling to become more agressive. +/// improvement inside that API from causing peeling to become more aggressive. /// This should probably be removed. static bool violatesLegacyMultiExitLoopCheck(Loop *L) { BasicBlock *Latch = L->getLoopLatch(); diff --git a/llvm/lib/Transforms/Utils/SymbolRewriter.cpp b/llvm/lib/Transforms/Utils/SymbolRewriter.cpp --- a/llvm/lib/Transforms/Utils/SymbolRewriter.cpp +++ b/llvm/lib/Transforms/Utils/SymbolRewriter.cpp @@ -40,7 +40,7 @@ // // Note that source and exactly one of [Target, Transform] must be provided // -// New rewrite descriptors can be created. Addding a new rewrite descriptor +// New rewrite descriptors can be created. Adding a new rewrite descriptor // involves: // // a) extended the rewrite descriptor kind enumeration diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -704,7 +704,7 @@ /// The legality analysis. LoopVectorizationLegality *Legal; - /// The profitablity analysis. + /// The profitability analysis. LoopVectorizationCostModel *Cost; // Record whether runtime checks are added. diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -9398,7 +9398,7 @@ }; // Any instruction which isn't safe to speculate at the begining of the - // block is control dependend on any early exit or non-willreturn call + // block is control dependent on any early exit or non-willreturn call // which proceeds it. if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { for (Instruction *I = BundleMember->Inst->getNextNode(); @@ -9426,7 +9426,7 @@ if (match(I, m_Intrinsic()) || match(I, m_Intrinsic())) // Any allocas past here must be control dependent on I, and I - // must be memory dependend on BundleMember->Inst. + // must be memory dependent on BundleMember->Inst. break; if (!isa(I)) diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h --- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -34,7 +34,7 @@ /// The legality analysis. LoopVectorizationLegality *Legal; - /// The profitablity analysis. + /// The profitability analysis. LoopVectorizationCostModel &CM; PredicatedScalarEvolution &PSE; diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -1499,7 +1499,7 @@ /// a vector operand into a scalar value, and adding the result to a chain. /// The Operands are {ChainOp, VecOp, [Condition]}. class VPReductionRecipe : public VPRecipeBase, public VPValue { - /// The recurrence decriptor for the reduction in question. + /// The recurrence descriptor for the reduction in question. const RecurrenceDescriptor *RdxDesc; /// Pointer to the TTI, needed to create the target reduction const TargetTransformInfo *TTI; diff --git a/llvm/test/Analysis/BasicAA/modref.ll b/llvm/test/Analysis/BasicAA/modref.ll --- a/llvm/test/Analysis/BasicAA/modref.ll +++ b/llvm/test/Analysis/BasicAA/modref.ll @@ -194,7 +194,7 @@ ret i32 %Diff } -;; In this case load can *not* be removed. Function clobers only %P2 but it may +;; In this case load can *not* be removed. Function clobbers only %P2 but it may ;; alias with %P. define i32 @test10(i32* %P, i32* %P2) { ; CHECK-LABEL: @test10( diff --git a/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll b/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll --- a/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll +++ b/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -disable-output -passes="print" 2>&1 | FileCheck %s -; Note: exact results can be achived even if +; Note: exact results can be achieved even if ; "-da-disable-delinearization-checks" is not used ; CHECK-LABEL: t1 diff --git a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll --- a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll @@ -1,10 +1,10 @@ ; RUN: opt -passes='print-access-info' -disable-output < %s 2>&1 | FileCheck %s -; Check that loop-indepedent forward dependences are discovered properly. +; Check that loop-independent forward dependences are discovered properly. ; ; FIXME: This does not actually always work which is pretty confusing. Right -; now there is hack in LAA that tries to figure out loop-indepedent forward -; dependeces *outside* of the MemoryDepChecker logic (i.e. proper dependence +; now there is hack in LAA that tries to figure out loop-independent forward +; dependencies *outside* of the MemoryDepChecker logic (i.e. proper dependence ; analysis). ; ; Therefore if there is only loop-independent dependences for an array diff --git a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll --- a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll @@ -108,7 +108,7 @@ br i1 %cmp, label %for.body, label %for.cond.cleanup } -; Following cases are unsafe depdences and are not vectorizable. +; Following cases are unsafe dependencies and are not vectorizable. ; void unsafe_Read_Write(int *A) { ; for (unsigned i = 0; i < 1024; i+=3) diff --git a/llvm/test/BugPoint/metadata.ll b/llvm/test/BugPoint/metadata.ll --- a/llvm/test/BugPoint/metadata.ll +++ b/llvm/test/BugPoint/metadata.ll @@ -8,7 +8,7 @@ ; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%pluginext %s -output-prefix %t-notype -bugpoint-crashcalls -silence-passes -disable-namedmd-remove -disable-strip-debuginfo > /dev/null ; RUN: llvm-dis %t-notype-reduced-simplified.bc -o - | FileCheck %s --check-prefix=NOTYPE ; -; Bugpoint can drop the metadata on the call, as it does not contrinute to the crash. +; Bugpoint can drop the metadata on the call, as it does not contribute to the crash. ; CHECK: call void @foo() ; NODEBUG: call void @foo() diff --git a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll --- a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll +++ b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll @@ -272,7 +272,7 @@ } ;; All non-aggregate fields must have the same type, all through the -;; overall aggreagate. This is false here because of the i32. +;; overall aggregate. This is false here because of the i32. %T_NESTED_STRUCT_DIFFM = type { [ 1 x { { double, double } } ], [ 1 x { { double, i32 } } ] diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll --- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -1,6 +1,6 @@ ; REQUIRES: asserts ; -; The Cortext-A57 machine model will avoid scheduling load instructions in +; The Cortex-A57 machine model will avoid scheduling load instructions in ; succession because loads on the A57 have a latency of 4 cycles and they all ; issue to the same pipeline. Instead, it will move other instructions between ; the loads to avoid unnecessary stalls. The generic machine model schedules 4 diff --git a/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir b/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir --- a/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir +++ b/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir @@ -1,6 +1,6 @@ # RUN: llc -o - %s -mtriple=aarch64--linux-gnu -mcpu=falkor -run-pass=aarch64-ccmp | FileCheck %s --- -# This test checks that successor probabilties are properly updated after a +# This test checks that successor probabilities are properly updated after a # ccmp-conversion. # # CHECK-LABEL: name: aarch64-ccmp-successor-probs diff --git a/llvm/test/CodeGen/AArch64/ifcvt-select.ll b/llvm/test/CodeGen/AArch64/ifcvt-select.ll --- a/llvm/test/CodeGen/AArch64/ifcvt-select.ll +++ b/llvm/test/CodeGen/AArch64/ifcvt-select.ll @@ -1,5 +1,5 @@ ; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone < %s | FileCheck %s -; Do not generate redundant select in early if-converstion pass. +; Do not generate redundant select in early if-conversion pass. define i32 @foo(i32 %a, i32 %b) { entry: diff --git a/llvm/test/CodeGen/AArch64/swift-async-win.ll b/llvm/test/CodeGen/AArch64/swift-async-win.ll --- a/llvm/test/CodeGen/AArch64/swift-async-win.ll +++ b/llvm/test/CodeGen/AArch64/swift-async-win.ll @@ -37,7 +37,7 @@ } ; NOTE: we do not see the canonical windows frame setup due to the `nounwind` -; attribtue on the function. +; attribute on the function. ; CHECK: sub sp, sp, #64 ; CHECK: stp x30, x29, [sp, #16] diff --git a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll --- a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll +++ b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll @@ -37,7 +37,7 @@ ; GISEL: STRXui %{{.*}}, %fixed-stack.0 ; Make sure that there is an dependence edge between fi#-2 and fi#-4. -; Without this edge the scheduler would be free to move the store accross the load. +; Without this edge the scheduler would be free to move the store across the load. ; COMMON: {{^SU(.*)}}: [[VRB]]:gpr64 = LDRXui %fixed-stack.2 ; COMMON-NOT: {{^SU(.*)}}: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll @@ -18,7 +18,7 @@ ret void } -; Check flags are preserved for an arbitrarry target intrinsic +; Check flags are preserved for an arbitrary target intrinsic ; CHECK-LABEL: name: rcp_nsz ; CHECK: = nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %{{[0-9]+}}(s32) define amdgpu_kernel void @rcp_nsz(float %arg0) { diff --git a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir --- a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir +++ b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir @@ -1,7 +1,7 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=si-lower-control-flow -amdgpu-remove-redundant-endcf %s -o - | FileCheck -check-prefix=GCN %s -# Make sure dbg_value doesn't change codeegn when collapsing end_cf +# Make sure dbg_value doesn't change codegen when collapsing end_cf --- name: simple_nested_if_dbg_value tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll --- a/llvm/test/CodeGen/AMDGPU/idot4u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll @@ -886,7 +886,7 @@ ret void } -; TODO: Support commutation accross the adds. +; TODO: Support commutation across the adds. define amdgpu_kernel void @udot4_CommutationAccrossMADs(<4 x i8> addrspace(1)* %src1, ; GFX7-LABEL: udot4_CommutationAccrossMADs: ; GFX7: ; %bb.0: ; %entry diff --git a/llvm/test/CodeGen/AMDGPU/idot8u.ll b/llvm/test/CodeGen/AMDGPU/idot8u.ll --- a/llvm/test/CodeGen/AMDGPU/idot8u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot8u.ll @@ -2811,7 +2811,7 @@ ret void } -; TODO: Once the adictional "and+add" are removed, the pattern will be recognized. +; TODO: Once the additional "and+add" are removed, the pattern will be recognized. define amdgpu_kernel void @udot8_acc4_vecMul(<8 x i4> addrspace(1)* %src1, ; GFX7-LABEL: udot8_acc4_vecMul: ; GFX7: ; %bb.0: ; %entry diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll --- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll +++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll @@ -422,7 +422,7 @@ ret void } -; offset puts outside of superegister bounaries, so clamp to 1st element. +; offset puts outside of superegister boundaries, so clamp to 1st element. ; GCN-LABEL: {{^}}extract_largest_inbounds_offset: ; GCN-DAG: buffer_load_dwordx4 v[[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]] ; GCN-DAG: s_load_dword [[IDX0:s[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir b/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir --- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir +++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir @@ -195,7 +195,7 @@ $vgpr0 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec ... -# There's no need to encode the VALU depdendency because it will complete before +# There's no need to encode the VALU dependency because it will complete before # the TRANS. --- name: trans32_dep_1_only diff --git a/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll b/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll --- a/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll +++ b/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll @@ -8,7 +8,7 @@ ; The machine verifier complains about usage of register ; which is marked as killed in previous instruction. ; This happens due to when register allocator is out of registers -; it takes the first avialable register. +; it takes the first available register. ; CHECK: error: ran out of registers during register allocation ; CHECK: Bad machine code: Using an undefined physical register diff --git a/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll b/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll --- a/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll +++ b/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll @@ -19,7 +19,7 @@ ret void } -; Test for a crach in SIInstrInfo::areLoadsFromSameBasePtr() when checking +; Test for a crash in SIInstrInfo::areLoadsFromSameBasePtr() when checking ; an MUBUF load which does not have a vaddr operand. ; FUNC-LABEL: {{^}}same_base_ptr_crash: ; SI: buffer_load_dword diff --git a/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll b/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll --- a/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll +++ b/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll @@ -4,7 +4,7 @@ ; When a frame index offset is more than 12-bits, make sure we don't store ; it in mubuf's offset field. -; Also, make sure we use the same register for storing the scratch buffer addresss +; Also, make sure we use the same register for storing the scratch buffer address ; for both stores. This register is allocated by the register scavenger, so we ; should be able to reuse the same regiser for each scratch buffer access. diff --git a/llvm/test/CodeGen/AMDGPU/structurize1.ll b/llvm/test/CodeGen/AMDGPU/structurize1.ll --- a/llvm/test/CodeGen/AMDGPU/structurize1.ll +++ b/llvm/test/CodeGen/AMDGPU/structurize1.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=r600 -mcpu=redwood -r600-if-convert=0 < %s | FileCheck %s -; This tests for abug where the AMDILCFGStructurizer was crashing on loops +; This tests for a bug where the AMDILCFGStructurizer was crashing on loops ; like this: ; ; for (i = 0; i < x; i++) { diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll --- a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll @@ -216,7 +216,7 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp, float %bias, float %zcompare, float %s, float %t, float %clamp) { ; The vgpr tuple8 operand in image_gather4_c_b_cl instruction needs to be preserved -; across the call and should get allcoated to 8 CSRs. +; across the call and should get allocated to 8 CSRs. ; Only the lower 5 sub-registers of the tuple are preserved. ; The upper 3 sub-registers are unused. ; GFX9-LABEL: call_preserved_vgpr_tuple8: diff --git a/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll b/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll --- a/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll +++ b/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll @@ -2,7 +2,7 @@ ; This test is for a scheduler bug where VTX_READ instructions that used ; the result of another VTX_READ instruction were being grouped in the -; same fetch clasue. +; same fetch clause. ; CHECK: {{^}}test: ; CHECK: Fetch clause diff --git a/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir b/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir --- a/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir +++ b/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir @@ -1,7 +1,7 @@ # RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s # TODO: Write this test. -# This instruction isn't expanded by the pseudo expansion passs, but +# This instruction isn't expanded by the pseudo expansion pass, but # rather AVRRegisterInfo::eliminateFrameIndex. --- | diff --git a/llvm/test/CodeGen/Hexagon/fp16.ll b/llvm/test/CodeGen/Hexagon/fp16.ll --- a/llvm/test/CodeGen/Hexagon/fp16.ll +++ b/llvm/test/CodeGen/Hexagon/fp16.ll @@ -6,7 +6,7 @@ ; (__extendhfsf2). ; The extension from fp16 to fp64 is implicitly handled by __extendhfsf2 and convert_sf2d. ; (fp16->fp32->fp64). -; Generate correcct libcall names for conversion from fp32/fp64 to fp16 +; Generate correct libcall names for conversion from fp32/fp64 to fp16 ; (__truncsfhf2 and __truncdfhf2) ; Verify that we generate loads and stores of halfword. diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s | FileCheck %s ; For the Phis generated in the epilog, test that we generate the correct -; names for the values coming from the prolog stages. The test belows +; names for the values coming from the prolog stages. The test below ; checks that the value loaded in the first prolog block gets propagated ; through the first epilog to the use after the loop. diff --git a/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll b/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll --- a/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll +++ b/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -pipeliner-ignore-recmii -pipeliner-max-stages=2 -enable-pipeliner < %s -pipeliner-experimental-cg=true | FileCheck %s -; This is a loop we pipeline to three packets, though we could do bettter. +; This is a loop we pipeline to three packets, though we could do better. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: diff --git a/llvm/test/CodeGen/M68k/Arith/mul64.ll b/llvm/test/CodeGen/M68k/Arith/mul64.ll --- a/llvm/test/CodeGen/M68k/Arith/mul64.ll +++ b/llvm/test/CodeGen/M68k/Arith/mul64.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=m68k-linux -verify-machineinstrs | FileCheck %s -; Currenlty making the libcall is ok, x20 supports i32 mul/div which +; Currently making the libcall is ok, x20 supports i32 mul/div which ; yields saner expansion for i64 mul define i64 @foo(i64 %t, i64 %u) nounwind { ; CHECK-LABEL: foo: diff --git a/llvm/test/CodeGen/M68k/CollapseMOVEM.mir b/llvm/test/CodeGen/M68k/CollapseMOVEM.mir --- a/llvm/test/CodeGen/M68k/CollapseMOVEM.mir +++ b/llvm/test/CodeGen/M68k/CollapseMOVEM.mir @@ -4,7 +4,7 @@ #------------------------------------------------------------------------------ # CollapseMOVEM pass finds sequences of MOVEM instructions and collapse them -# into a single instruciton with merged masks. This only works with stack data +# into a single instruction with merged masks. This only works with stack data #------------------------------------------------------------------------------ --- # CollapseMOVEM_RM diff --git a/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll b/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll --- a/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll +++ b/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll @@ -40,7 +40,7 @@ ret void } -; If the SD nodes are not cleaup up correctly, then this can fail to compile +; If the SD nodes are not cleaned up correctly, then this can fail to compile ; with an error like: Cannot select: ch = setlt [ID=6] ; CHECK: @test1 diff --git a/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll b/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll --- a/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll @@ -45,7 +45,7 @@ define dso_local void @caller_64_64_copy_ccc([8 x i64] %a, [8 x i64] %b) #1 { tail call fastcc void @callee_64_64_copy_fastcc([8 x i64] %a, [8 x i64] %b) ret void -; If caller and callee use different calling convensions, we cannot apply TCO. +; If caller and callee use different calling conventions, we cannot apply TCO. ; CHECK-SCO-LABEL: caller_64_64_copy_ccc: ; CHECK-SCO: bl callee_64_64_copy_fastcc } diff --git a/llvm/test/CodeGen/PowerPC/store_fptoi.ll b/llvm/test/CodeGen/PowerPC/store_fptoi.ll --- a/llvm/test/CodeGen/PowerPC/store_fptoi.ll +++ b/llvm/test/CodeGen/PowerPC/store_fptoi.ll @@ -5,7 +5,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-PWR8 %s ; ========================================== -; Tests for store of fp_to_sint converstions +; Tests for store of fp_to_sint conversions ; ========================================== ; Function Attrs: norecurse nounwind @@ -589,7 +589,7 @@ } ; ========================================== -; Tests for store of fp_to_uint converstions +; Tests for store of fp_to_uint conversions ; ========================================== ; Function Attrs: norecurse nounwind diff --git a/llvm/test/CodeGen/PowerPC/test-vector-insert.ll b/llvm/test/CodeGen/PowerPC/test-vector-insert.ll --- a/llvm/test/CodeGen/PowerPC/test-vector-insert.ll +++ b/llvm/test/CodeGen/PowerPC/test-vector-insert.ll @@ -18,7 +18,7 @@ ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \ ; RUN: -mcpu=pwr9 < %s | FileCheck %s --check-prefix=CHECK-BE-P9 ; xscvdpsxws and xscvdpsxws is only available on Power7 and above -; Codgen is different for Power7, Power8, and Power9. +; Codegen is different for Power7, Power8, and Power9. define dso_local <4 x i32> @test(<4 x i32> %a, double %b) { ; CHECK-LE-P7-LABEL: test: diff --git a/llvm/test/CodeGen/RISCV/frm-dependency.ll b/llvm/test/CodeGen/RISCV/frm-dependency.ll --- a/llvm/test/CodeGen/RISCV/frm-dependency.ll +++ b/llvm/test/CodeGen/RISCV/frm-dependency.ll @@ -57,7 +57,7 @@ ret float %1 } -; This uses rtz instead of dyn rounding mode so shouldn't have an FRM dependncy. +; This uses rtz instead of dyn rounding mode so shouldn't have an FRM dependency. define i32 @fcvt_w_s(float %a) nounwind { ; RV32IF-LABEL: name: fcvt_w_s ; RV32IF: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/SystemZ/fp-const-10.ll b/llvm/test/CodeGen/SystemZ/fp-const-10.ll --- a/llvm/test/CodeGen/SystemZ/fp-const-10.ll +++ b/llvm/test/CodeGen/SystemZ/fp-const-10.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s ; Test that we don't do an FP extending load, as this would result in a -; converstion to QNaN. +; conversion to QNaN. define double @f1() { ; CHECK-LABEL: .LCPI0_0 ; CHECK: .quad 0x7ff4000000000000 diff --git a/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll b/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll --- a/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll +++ b/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -O0 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 -mattr=+vfp2 ; This test creates a big stack frame without spilling any callee-saved registers. -; Make sure the whole stack frame is addrerssable wiothout scavenger crashes. +; Make sure the whole stack frame is addressable wiothout scavenger crashes. target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32" target triple = "thumbv7-apple-darwin3.0.0-iphoneos" diff --git a/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll b/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll --- a/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll +++ b/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll @@ -4,7 +4,7 @@ define i64 @f1(i64 %a, i64 %b) { ; CHECK-LABEL: f1: ; CHECK: subs.w r0, r0, r2 -; To test dead_carry, +32bit prevents sbc conveting to 16-bit sbcs +; To test dead_carry, +32bit prevents sbc converting to 16-bit sbcs ; CHECK: sbc.w r1, r1, r3 %tmp = sub i64 %a, %b ret i64 %tmp diff --git a/llvm/test/CodeGen/WebAssembly/exception.ll b/llvm/test/CodeGen/WebAssembly/exception.ll --- a/llvm/test/CodeGen/WebAssembly/exception.ll +++ b/llvm/test/CodeGen/WebAssembly/exception.ll @@ -319,7 +319,7 @@ ret void } -; Tests a case when a cleanup region (cleanuppad ~ clanupret) contains another +; Tests a case when a cleanup region (cleanuppad ~ cleanupret) contains another ; catchpad define void @test_complex_cleanup_region() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: diff --git a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll --- a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll +++ b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll @@ -4,7 +4,7 @@ ; ; It's possible to schedule this in 14 instructions by avoiding ; callee-save registers, but the scheduler isn't currently that -; conervative with registers. +; conservative with registers. @size20 = external dso_local global i32 ; [#uses=1] @in5 = external dso_local global ptr ; [#uses=1] diff --git a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll --- a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll +++ b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll @@ -10,7 +10,7 @@ ; ; Move return address (76(%esp)) to a temporary register (%ebp) ; CHECK: movl 76(%esp), [[REGISTER:%[a-z]+]] -; Overwrite return addresss +; Overwrite return address ; CHECK: movl [[EBX:%[a-z]+]], 76(%esp) ; Move return address from temporary register (%ebp) to new stack location (60(%esp)) ; CHECK: movl [[REGISTER]], 60(%esp) diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll --- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll @@ -1080,7 +1080,7 @@ } ; Test cases for . -; Instruction selection for broacast instruction fails if +; Instruction selection for broadcast instruction fails if ; the load cannot be folded into the broadcast. ; This happens if the load has initial one use but other uses are ; created later, or if selection DAG cannot prove that folding the diff --git a/llvm/test/CodeGen/X86/bitcnt-false-dep.ll b/llvm/test/CodeGen/X86/bitcnt-false-dep.ll --- a/llvm/test/CodeGen/X86/bitcnt-false-dep.ll +++ b/llvm/test/CodeGen/X86/bitcnt-false-dep.ll @@ -86,7 +86,7 @@ ;HSW: xorl [[GPR0:%e[a-d]x]], [[GPR0]] ;HSW-NEXT: tzcntl {{.*}}, [[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_tzct32 ;SKL-NOT: xor ;SKL: tzcntl @@ -113,7 +113,7 @@ ;HSW: xorl %e[[GPR0:[a-d]x]], %e[[GPR0]] ;HSW-NEXT: tzcntq {{.*}}, %r[[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_tzct64 ;SKL-NOT: xor ;SKL: tzcntq @@ -140,7 +140,7 @@ ;HSW: xorl [[GPR0:%e[a-d]x]], [[GPR0]] ;HSW-NEXT: lzcntl {{.*}}, [[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_lzct32 ;SKL-NOT: xor ;SKL: lzcntl @@ -167,7 +167,7 @@ ;HSW: xorl %e[[GPR0:[a-d]x]], %e[[GPR0]] ;HSW-NEXT: lzcntq {{.*}}, %r[[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_lzct64 ;SKL-NOT: xor ;SKL: lzcntq diff --git a/llvm/test/CodeGen/X86/block-placement.ll b/llvm/test/CodeGen/X86/block-placement.ll --- a/llvm/test/CodeGen/X86/block-placement.ll +++ b/llvm/test/CodeGen/X86/block-placement.ll @@ -1496,7 +1496,7 @@ ; if it introduces extra branch. ; Specifically in this case because best exit is .header ; but it has fallthrough to .middle block and last block in -; loop chain .slow does not have afallthrough to .header. +; loop chain .slow does not have a fallthrough to .header. ; CHECK-LABEL: not_rotate_if_extra_branch ; CHECK: %.entry ; CHECK: %.header @@ -1541,7 +1541,7 @@ define i32 @not_rotate_if_extra_branch_regression(i32 %count, i32 %init) { ; This is a regression test against patch avoid loop rotation if -; it introduce an extra btanch. +; it introduce an extra branch. ; CHECK-LABEL: not_rotate_if_extra_branch_regression ; CHECK: %.entry ; CHECK: %.first_backedge diff --git a/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll b/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll --- a/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll +++ b/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll @@ -25,7 +25,7 @@ ; v1 is now dead so we remove its live-range. ; Actually, we shrink it to empty to keep the ; instruction around for futher remat opportunities -; (accessbile via the origin pointer.) +; (accessible via the origin pointer.) ; ; Later v2 gets remove as well (e.g., because we ; remat it closer to its use) and the live-range diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll --- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll +++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll @@ -3,7 +3,7 @@ ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) -; Canolicalize the sequence shl/zext/lshr performing the zeroextend +; Canonicalize the sequence shl/zext/lshr performing the zeroextend ; as the last instruction of the sequence. ; This will help DAGCombiner to identify and then fold the sequence ; of shifts into a single AND. diff --git a/llvm/test/CodeGen/X86/fptoui-may-overflow.ll b/llvm/test/CodeGen/X86/fptoui-may-overflow.ll --- a/llvm/test/CodeGen/X86/fptoui-may-overflow.ll +++ b/llvm/test/CodeGen/X86/fptoui-may-overflow.ll @@ -19,7 +19,7 @@ ret <16 x i8> %b } -; In @fptoui_shuffle, we must preserve the vpand for correctnesss. Only the +; In @fptoui_shuffle, we must preserve the vpand for correctness. Only the ; i8 values extracted from %s are poison. The values from the zeroinitializer ; are not. diff --git a/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll b/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll --- a/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll +++ b/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll @@ -18,7 +18,7 @@ ; Get p. ; CHECK-NEXT: movl _p@{{[0-9a-zA-Z]+}}, [[P_ADDR:%[a-z]+]] ; CHECK-NEXT: calll *([[P_ADDR]]) -; At this point eax contiains the address of p. +; At this point eax contains the address of p. ; Load c address. ; Make sure we do not clobber eax. ; CHECK-NEXT: movl [[C_SPILLED]], [[C_ADDR_RELOADED:%e[b-z]x+]] diff --git a/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll b/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll --- a/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll +++ b/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll @@ -4,7 +4,7 @@ ; regular function calls in the course of IR transformations. ; ; Test that the code generator will emit the function call and not consider it -; an "implausible instruciton". In the past this silently truncated code on +; an "implausible instruction". In the past this silently truncated code on ; exception paths and caused crashes at runtime. ; ; Reduced IR generated from ObjC++ source: diff --git a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll --- a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll +++ b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll @@ -4,7 +4,7 @@ ;; In functions with 'no_caller_saved_registers' attribute, all registers should ;; be preserved except for registers used for passing/returning arguments. ;; In the following function registers %rdi, %rsi and %xmm0 are used to store -;; arguments %a0, %a1 and %b0 accordingally. The value is returned in %rax. +;; arguments %a0, %a1 and %b0 accordingly. The value is returned in %rax. ;; The above registers should not be preserved, however other registers ;; (that are modified by the function) should be preserved (%rdx and %xmm1). define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 { diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll --- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -276,7 +276,7 @@ ; Check landing pad again. ; This time checks that we can shrink-wrap when the epilogue does not -; span accross several blocks. +; span across several blocks. ; ; CHECK-LABEL: with_nounwind_same_succ: ; diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll --- a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll @@ -1168,7 +1168,7 @@ @a = common global i32 0, align 4 ; Make sure the prologue does not clobber the EFLAGS when -; it is live accross. +; it is live across. ; PR25629. ; Note: The registers may change in the following patterns, but ; because they imply register hierarchy (e.g., eax, al) this is diff --git a/llvm/test/DebugInfo/Generic/debug-info-enum.ll b/llvm/test/DebugInfo/Generic/debug-info-enum.ll --- a/llvm/test/DebugInfo/Generic/debug-info-enum.ll +++ b/llvm/test/DebugInfo/Generic/debug-info-enum.ll @@ -170,7 +170,7 @@ ; Test enumeration without a fixed underlying type, but with the DIFlagEnumClass ; set. The DW_AT_enum_class attribute should be absent. This behaviour is -; intented to keep compatibilty with existing DWARF consumers, which may imply +; intented to keep compatibility with existing DWARF consumers, which may imply ; the type is present whenever DW_AT_enum_class is set. !63 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E9", file: !3, line: 63, size: 32, flags: DIFlagEnumClass, elements: !64, identifier: "_ZTS2E9") !64 = !{!65, !66} diff --git a/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir b/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir --- a/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir +++ b/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir @@ -1,7 +1,7 @@ --- | ; RUN: llc -run-pass=machinelicm -o - %s | FileCheck %s ; Line numbers should not be retained when loop invariant instructions are hoisted. - ; Doing so causes poor stepping bevavior. + ; Doing so causes poor stepping behavior. ; ; Created from: ; int x; diff --git a/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll b/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll --- a/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll +++ b/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll @@ -26,7 +26,7 @@ ; CHECK-NEXT: COPY ; CHECK-NEXT: RET ; -;; For instr-ref, no copies should be considered. Because argumenst are +;; For instr-ref, no copies should be considered. Because arguments are ;; Special, we don't label them in the same way, and currently emit a ;; DBG_VALUE for the physreg. ; INSTRREF-LABEL: name: fn1 diff --git a/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll b/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll --- a/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll +++ b/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll @@ -8,7 +8,7 @@ ; real :: dummy ; end module external_module ; -; em.f90 (to compile: -g -llvm-emit -c -S em.f90) +; em.f90 (to compile: -g -llvm-emit -c -S em.f90) ; program use_external_module ; use external_module ; implicit none diff --git a/llvm/test/DebugInfo/macro_link.ll b/llvm/test/DebugInfo/macro_link.ll --- a/llvm/test/DebugInfo/macro_link.ll +++ b/llvm/test/DebugInfo/macro_link.ll @@ -1,6 +1,6 @@ ; RUN: llvm-link %s %s -S -o -| FileCheck %s -; This test checks that DIMacro and DIMacroFile comaprison works correctly. +; This test checks that DIMacro and DIMacroFile comparison works correctly. ; CHECK: !llvm.dbg.cu = !{[[CU1:![0-9]*]], [[CU2:![0-9]*]]} diff --git a/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll b/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll --- a/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll +++ b/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll @@ -31,7 +31,7 @@ ; CHECK: call void @__sanitizer_cov_trace_gep(i64 %idxprom) ; CHECK: ret void -; Just make sure we don't insturment this one and don't crash +; Just make sure we don't instrument this one and don't crash define void @gep_3(<2 x i8*> %a, i32 %i, i32 %j) { entry: %0 = getelementptr i8, <2 x i8*> %a, <2 x i64> diff --git a/llvm/test/MC/AArch64/arm64-branch-encoding.s b/llvm/test/MC/AArch64/arm64-branch-encoding.s --- a/llvm/test/MC/AArch64/arm64-branch-encoding.s +++ b/llvm/test/MC/AArch64/arm64-branch-encoding.s @@ -23,7 +23,7 @@ ; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_call26 ;----------------------------------------------------------------------------- -; Contitional branch instructions. +; Conditional branch instructions. ;----------------------------------------------------------------------------- b L1 diff --git a/llvm/test/MC/ARM/mul-v4.s b/llvm/test/MC/ARM/mul-v4.s --- a/llvm/test/MC/ARM/mul-v4.s +++ b/llvm/test/MC/ARM/mul-v4.s @@ -1,4 +1,4 @@ -@ PR17647: MUL/MLA/SMLAL/UMLAL should be avalaibe to IAS for ARMv4 and higher +@ PR17647: MUL/MLA/SMLAL/UMLAL should be available to IAS for ARMv4 and higher @ RUN: llvm-mc < %s -triple armv4-unknown-unknown -show-encoding | FileCheck %s --check-prefix=ARMV4 diff --git a/llvm/test/MC/AsmParser/directive_abort.s b/llvm/test/MC/AsmParser/directive_abort.s --- a/llvm/test/MC/AsmParser/directive_abort.s +++ b/llvm/test/MC/AsmParser/directive_abort.s @@ -1,6 +1,6 @@ # RUN: not llvm-mc -triple i386-unknown-unknown %s 2> %t # RUN: FileCheck -input-file %t %s -# CHECK: error: .abort 'please stop assembing' +# CHECK: error: .abort 'please stop assembling' TEST0: - .abort please stop assembing + .abort please stop assembling diff --git a/llvm/test/MC/ELF/section-sym.s b/llvm/test/MC/ELF/section-sym.s --- a/llvm/test/MC/ELF/section-sym.s +++ b/llvm/test/MC/ELF/section-sym.s @@ -7,7 +7,7 @@ // Test that the relocation points to the first section foo. -// The first seciton foo has index 6 +// The first section foo has index 6 // CHECK: Section { // CHECK: Index: 4 // CHECK-NEXT: Name: foo diff --git a/llvm/test/MC/ELF/section-unique-err4.s b/llvm/test/MC/ELF/section-unique-err4.s --- a/llvm/test/MC/ELF/section-unique-err4.s +++ b/llvm/test/MC/ELF/section-unique-err4.s @@ -1,5 +1,5 @@ // RUN: not llvm-mc -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s -// CHECK: error: expected commma +// CHECK: error: expected comma .section .text,"ax",@progbits,unique 1 diff --git a/llvm/test/MC/Hexagon/capitalizedEndloop.s b/llvm/test/MC/Hexagon/capitalizedEndloop.s --- a/llvm/test/MC/Hexagon/capitalizedEndloop.s +++ b/llvm/test/MC/Hexagon/capitalizedEndloop.s @@ -1,7 +1,7 @@ # RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-objdump -d -r - | FileCheck %s # -# Verify that capitaizled endloops work +# Verify that capitalized endloops work { R0 = mpyi(R0,R0) } : endloop0 { R0 = mpyi(R0,R0) } : ENDLOOP0 diff --git a/llvm/test/MC/Mips/mt/module-directive.s b/llvm/test/MC/Mips/mt/module-directive.s --- a/llvm/test/MC/Mips/mt/module-directive.s +++ b/llvm/test/MC/Mips/mt/module-directive.s @@ -4,7 +4,7 @@ # RUN: FileCheck --check-prefix=CHECK-ASM %s # Test that the .module directive sets the MT flag in .MIPS.abiflags when -# assembling to boject files. +# assembling to object files. # Test that the .moodule directive is re-emitted when expanding assembly. diff --git a/llvm/test/MC/X86/align-via-relaxation.s b/llvm/test/MC/X86/align-via-relaxation.s --- a/llvm/test/MC/X86/align-via-relaxation.s +++ b/llvm/test/MC/X86/align-via-relaxation.s @@ -44,7 +44,7 @@ foo: ret - # Check that we're not shifting aroudn the offsets of labels - doing + # Check that we're not shifting around the offsets of labels - doing # that would require a further round of relaxation # CHECK: : # CHECK: 22: eb fe jmp 0x22 diff --git a/llvm/test/Object/archive-update.test b/llvm/test/Object/archive-update.test --- a/llvm/test/Object/archive-update.test +++ b/llvm/test/Object/archive-update.test @@ -17,7 +17,7 @@ RUN: echo newer > %t/tmp.newer/evenlen RUN: touch %t/tmp.newer/evenlen -Create an achive with the newest file +Create an archive with the newest file RUN: llvm-ar rU %t/tmp.a %t/tmp.newer/evenlen RUN: llvm-ar p %t/tmp.a | FileCheck --check-prefix=NEWER %s diff --git a/llvm/test/ThinLTO/X86/guid_collision.ll b/llvm/test/ThinLTO/X86/guid_collision.ll --- a/llvm/test/ThinLTO/X86/guid_collision.ll +++ b/llvm/test/ThinLTO/X86/guid_collision.ll @@ -1,5 +1,5 @@ ; Make sure LTO succeeds even if %t.bc contains a GlobalVariable F and -; %t2.bc cointains a Function F with the same GUID. +; %t2.bc contains a Function F with the same GUID. ; ; RUN: opt -module-summary %s -o %t.bc ; RUN: opt -module-summary %p/Inputs/guid_collision.ll -o %t2.bc diff --git a/llvm/test/Transforms/ArgumentPromotion/profile.ll b/llvm/test/Transforms/ArgumentPromotion/profile.ll --- a/llvm/test/Transforms/ArgumentPromotion/profile.ll +++ b/llvm/test/Transforms/ArgumentPromotion/profile.ll @@ -2,7 +2,7 @@ ; RUN: opt -passes=argpromotion,mem2reg -S < %s | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -; Checks if !prof metadata is corret in deadargelim. +; Checks if !prof metadata is correct in deadargelim. define void @caller() #0 { ; CHECK-LABEL: define {{[^@]+}}@caller() { diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll @@ -5,7 +5,7 @@ ; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -; Checks if !prof metadata is corret in deadargelim. +; Checks if !prof metadata is correct in deadargelim. define void @caller() #0 { ; IS________OPM-LABEL: define {{[^@]+}}@caller() { diff --git a/llvm/test/Transforms/Attributor/dereferenceable-1.ll b/llvm/test/Transforms/Attributor/dereferenceable-1.ll --- a/llvm/test/Transforms/Attributor/dereferenceable-1.ll +++ b/llvm/test/Transforms/Attributor/dereferenceable-1.ll @@ -310,7 +310,7 @@ } ; TEST 8 -; Use Constant range in deereferenceable +; Use Constant range in dereferenceable ; void g(int *p, long long int *range){ ; int r = *range ; // [10, 99] ; fill_range(p, *range); diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll --- a/llvm/test/Transforms/Attributor/liveness.ll +++ b/llvm/test/Transforms/Attributor/liveness.ll @@ -497,7 +497,7 @@ ret i32 0 } -; TEST 6: Undefined behvior, taken from LangRef. +; TEST 6: Undefined behavior, taken from LangRef. ; FIXME: Should be able to detect undefined behavior. define void @ub(i32* %0) { diff --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll --- a/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll +++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll @@ -1,4 +1,4 @@ -;; AArch64 is arbitralily chosen as a 32/64-bit RISC representative to show the transform in all tests. +;; AArch64 is arbitrarily chosen as a 32/64-bit RISC representative to show the transform in all tests. ; RUN: opt < %s -codegenprepare -S -mtriple=aarch64-unknown-unknown | FileCheck %s --check-prefix=ARM64 diff --git a/llvm/test/Transforms/DeadArgElim/aggregates.ll b/llvm/test/Transforms/DeadArgElim/aggregates.ll --- a/llvm/test/Transforms/DeadArgElim/aggregates.ll +++ b/llvm/test/Transforms/DeadArgElim/aggregates.ll @@ -131,7 +131,7 @@ ret i32 %ret } -; Case 6: When considering @mid, the return instruciton has sub-value 0 +; Case 6: When considering @mid, the return instruction has sub-value 0 ; unconditionally live, but 1 only conditionally live. Since at that level we're ; applying the results to the whole of %res, this means %res is live and cannot ; be reduced. There is scope for further optimisation here (though not visible diff --git a/llvm/test/Transforms/DeadArgElim/call_profile.ll b/llvm/test/Transforms/DeadArgElim/call_profile.ll --- a/llvm/test/Transforms/DeadArgElim/call_profile.ll +++ b/llvm/test/Transforms/DeadArgElim/call_profile.ll @@ -1,6 +1,6 @@ ; RUN: opt -passes=deadargelim -S < %s | FileCheck %s -; Checks if !prof metadata is corret in deadargelim. +; Checks if !prof metadata is correct in deadargelim. define void @caller() #0 { ; CHECK: call void @test_vararg(), !prof ![[PROF:[0-9]]] diff --git a/llvm/test/Transforms/EarlyCSE/fence.ll b/llvm/test/Transforms/EarlyCSE/fence.ll --- a/llvm/test/Transforms/EarlyCSE/fence.ll +++ b/llvm/test/Transforms/EarlyCSE/fence.ll @@ -56,7 +56,7 @@ ret i32 %res } -; We can not dead store eliminate accross the fence. We could in +; We can not dead store eliminate across the fence. We could in ; principal reorder the second store above the fence and then DSE either ; store, but this is beyond the simple last-store DSE which EarlyCSE ; implements. diff --git a/llvm/test/Transforms/EarlyCSE/invariant.start.ll b/llvm/test/Transforms/EarlyCSE/invariant.start.ll --- a/llvm/test/Transforms/EarlyCSE/invariant.start.ll +++ b/llvm/test/Transforms/EarlyCSE/invariant.start.ll @@ -575,7 +575,7 @@ ret i32 %sub } -; Invariant load defact starts an invariant.start scope of the appropriate size +; Invariant load defacto starts an invariant.start scope of the appropriate size define i32 @test_invariant_load_scope(ptr %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope ; NO_ASSUME-SAME: (ptr [[P:%.*]]) diff --git a/llvm/test/Transforms/GlobalOpt/invariant.group.ll b/llvm/test/Transforms/GlobalOpt/invariant.group.ll --- a/llvm/test/Transforms/GlobalOpt/invariant.group.ll +++ b/llvm/test/Transforms/GlobalOpt/invariant.group.ll @@ -45,7 +45,7 @@ ; %val = load i32, i32* %ptrVal, !invariant.group !0 ; into ; %val = load i32, i32* @tmp3, !invariant.group !0 -; and then we could assume that %val and %val2 to be the same, which coud be +; and then we could assume that %val and %val2 to be the same, which could be ; false, because @changeTmp3ValAndCallBarrierInside() may change the value ; of @tmp3. define void @_not_optimizable() { diff --git a/llvm/test/Transforms/GuardWidening/posion.ll b/llvm/test/Transforms/GuardWidening/posion.ll --- a/llvm/test/Transforms/GuardWidening/posion.ll +++ b/llvm/test/Transforms/GuardWidening/posion.ll @@ -9,7 +9,7 @@ ; interaction with poison values. ; Let x incoming parameter is used for rane checks. -; Test generates 5 checks. One of them (c2) is used to get the corretness +; Test generates 5 checks. One of them (c2) is used to get the correctness ; of nuw/nsw flags for x3 and x5. Others are used in guards and represent ; the checks x + 10 u< L, x + 15 u< L, x + 20 u< L and x + 3 u< L. ; The first two checks are in the first basic block and guard widening @@ -17,7 +17,7 @@ ; When c4 and c3 are considered, number of check becomes more than two ; and combineRangeCheck consider them as profitable even if they are in ; different basic blocks. -; Accoding to algorithm of combineRangeCheck it detects that c3 and c4 +; According to algorithm of combineRangeCheck it detects that c3 and c4 ; are enough to cover c1 and c5, so it ends up with guard of c3 && c4 ; while both of them are poison at entry. This is a bug. diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll b/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll --- a/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll +++ b/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll @@ -17,7 +17,7 @@ ;; In this example, the pointer IV is dynamicaly dead. As such, the fact that ;; inbounds produces poison *does not* trigger UB in the original loop. As ;; such, the pointer IV can be poison and adding a new use of the pointer -;; IV which dependends on that poison computation in a manner which might +;; IV which depends on that poison computation in a manner which might ;; trigger UB would be incorrect. ;; FIXME: This currently shows a miscompile! define void @neg_dynamically_dead_inbounds(i1 %always_false) #0 { diff --git a/llvm/test/Transforms/Inline/inline_call.ll b/llvm/test/Transforms/Inline/inline_call.ll --- a/llvm/test/Transforms/Inline/inline_call.ll +++ b/llvm/test/Transforms/Inline/inline_call.ll @@ -21,7 +21,7 @@ store i8* bitcast (void ()* @third to i8*), i8** %q, align 8 %tmp = call void (...)* @second(i8** %q) ; The call to 'wrapper' here is to ensure that its function attributes - ; i.e., returning its parameter and having no side effect, will be decuded + ; i.e., returning its parameter and having no side effect, will be deduced ; before the next round of inlining happens to 'top' to expose the bug. %call = call void (...)* @wrapper(void (...)* %tmp) ; The indirect call here is to confuse the alias analyzer so that diff --git a/llvm/test/Transforms/InstCombine/memchr-10.ll b/llvm/test/Transforms/InstCombine/memchr-10.ll --- a/llvm/test/Transforms/InstCombine/memchr-10.ll +++ b/llvm/test/Transforms/InstCombine/memchr-10.ll @@ -11,7 +11,7 @@ @a5 = constant [5 x i8] c"12345" -; Fold memchr(a5 + 5, c, 1) == a5 + 5 to an arbitrary constrant. +; Fold memchr(a5 + 5, c, 1) == a5 + 5 to an arbitrary constraint. ; The call is transformed to a5[5] == c by the memchr simplifier, with ; a5[5] being indeterminate. The equality then is the folded with ; an undefined/arbitrary result. diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll --- a/llvm/test/Transforms/InstCombine/zext.ll +++ b/llvm/test/Transforms/InstCombine/zext.ll @@ -124,7 +124,7 @@ ret i8 %5 } -; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross +; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded across ; nested logical operators. define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) { diff --git a/llvm/test/Transforms/LICM/hoist-phi.ll b/llvm/test/Transforms/LICM/hoist-phi.ll --- a/llvm/test/Transforms/LICM/hoist-phi.ll +++ b/llvm/test/Transforms/LICM/hoist-phi.ll @@ -974,7 +974,7 @@ br label %loop } -; Check that we correctly handle the hoisting of %gep when theres a critical +; Check that we correctly handle the hoisting of %gep when there's a critical ; edge that branches to the preheader. ; CHECK-LABEL: @crit_edge define void @crit_edge(i32* %ptr, i32 %idx, i1 %cond1, i1 %cond2) { diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll @@ -26,7 +26,7 @@ ret void } -; Check adjiacent memory locations are properly matched and the +; Check adjacent memory locations are properly matched and the ; longest chain vectorized ; GCN-LABEL: @interleave_get_longest diff --git a/llvm/test/Transforms/LoopFusion/cannot_fuse.ll b/llvm/test/Transforms/LoopFusion/cannot_fuse.ll --- a/llvm/test/Transforms/LoopFusion/cannot_fuse.ll +++ b/llvm/test/Transforms/LoopFusion/cannot_fuse.ll @@ -76,7 +76,7 @@ ret void } -; Check that fusion detects the two canddates are not adjacent (the exit block +; Check that fusion detects the two candidates are not adjacent (the exit block ; of the first candidate is not the preheader of the second candidate). ; CHECK: Performing Loop Fusion on function non_adjacent diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll --- a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll @@ -18,7 +18,7 @@ ; REGS: getelementptr i32, i32* %lsr.iv4, i64 1 ; LLC checks that LSR prefers less instructions to less registers. -; LSR should prefer complicated address to additonal add instructions. +; LSR should prefer complicated address to additional add instructions. ; CHECK: LBB0_2: ; CHECK-NEXT: movl (%r{{.+}}, diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll b/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll --- a/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll @@ -1,6 +1,6 @@ ; RUN: opt -S -loop-reduce < %s | FileCheck %s ; -;This test produces zero factor that becomes a denumerator and fails an assetion. +;This test produces zero factor that becomes a denumerator and fails an assertion. target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll --- a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll +++ b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll @@ -5,7 +5,7 @@ ; 2) -unroll-dynamic-cost-savings-discount ; ; They control loop-unrolling according to the following rules: -; * If size of unrolled loop exceeds the absoulte threshold, we don't unroll +; * If size of unrolled loop exceeds the absolute threshold, we don't unroll ; this loop under any circumstances. ; * If size of unrolled loop is below the '-unroll-threshold', then we'll ; consider this loop as a very small one, and completely unroll it. diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll --- a/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll @@ -5,7 +5,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" -; Loop peeling must result in valid scope declartions +; Loop peeling must result in valid scope declarations define internal fastcc void @test01(i8* %p0, i8* %p1, i8* %p2) unnamed_addr align 2 { ; CHECK-LABEL: @test01( diff --git a/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll b/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll --- a/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll +++ b/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll @@ -703,7 +703,7 @@ ; Has a positive dependency between two stores. Still valid. -; The negative dependecy is in unroll-and-jam-disabled.ll +; The negative dependency is in unroll-and-jam-disabled.ll define void @test7(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 { ; CHECK-LABEL: @test7( ; CHECK-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll --- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -1321,7 +1321,7 @@ ; ; for (int i=0; i<10000; i += 16) { ; if (trigger[i] < 100) { -; A[i] = B[i*2] + trigger[i]; << non-cosecutive access +; A[i] = B[i*2] + trigger[i]; << non-consecutive access ; } ; } ;} diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll --- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll @@ -886,7 +886,7 @@ -; Unconditioal accesses with gaps under Optsize scenario again, with unknown +; Unconditional accesses with gaps under Optsize scenario again, with unknown ; trip-count this time, in order to check the behavior of folding-the-tail ; (folding the remainder loop into the main loop using masking) together with ; interleaved-groups. Folding-the-tail turns the accesses to conditional which diff --git a/llvm/test/Transforms/LoopVectorize/skip-iterations.ll b/llvm/test/Transforms/LoopVectorize/skip-iterations.ll --- a/llvm/test/Transforms/LoopVectorize/skip-iterations.ll +++ b/llvm/test/Transforms/LoopVectorize/skip-iterations.ll @@ -8,7 +8,7 @@ ; safely speculating that the widened load of A[i] should not fault if the ; scalarized loop does not fault. For example, the ; original load in the scalar loop may not fault, but the last iteration of the -; vectorized load can fault (if it crosses a page boudary for example). +; vectorized load can fault (if it crosses a page boundary for example). ; This last vector iteration is where *one* of the ; scalar iterations lead to the early exit. diff --git a/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll b/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll --- a/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll @@ -5,8 +5,8 @@ ; vectorized. These conditions include following: ; * Inner and outer loop invariant select condition ; * Select condition depending on outer loop iteration variable. -; * Select condidition depending on inner loop iteration variable. -; * Select conditition depending on both outer and inner loop iteration +; * Select condition depending on inner loop iteration variable. +; * Select condition depending on both outer and inner loop iteration ; variables. define void @loop_invariant_select(double* noalias nocapture %out, i1 %select, double %a, double %b) { diff --git a/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll b/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll --- a/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll +++ b/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll @@ -48,7 +48,7 @@ ; to the actual function, not jump table ; CHECK-NEXT: call void @internal_hidden_def.cfi() -; dso_local function with defailt visibility can be short-circuited +; dso_local function with default visibility can be short-circuited ; CHECK-NEXT: call void @dsolocal_default_def.cfi() ; Local call - no action diff --git a/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll b/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll --- a/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll +++ b/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll @@ -1,5 +1,5 @@ ; RUN: opt -mergefunc -disable-output < %s -; This used to cause a crash when compairing the GEPs +; This used to cause a crash when comparing the GEPs define void @foo(<2 x i64*>) { %tmp = getelementptr i64, <2 x i64*> %0, <2 x i64> diff --git a/llvm/test/Transforms/Reassociate/xor_reassoc.ll b/llvm/test/Transforms/Reassociate/xor_reassoc.ll --- a/llvm/test/Transforms/Reassociate/xor_reassoc.ll +++ b/llvm/test/Transforms/Reassociate/xor_reassoc.ll @@ -301,7 +301,7 @@ } ; The bug was that when the compiler optimize "(x | c1)" ^ "(x & c2)", it may -; swap the two xor-subexpressions if they are not in canoninical order; however, +; swap the two xor-subexpressions if they are not in canonical order; however, ; when optimizer swaps two sub-expressions, if forgot to swap the cached value ; of c1 and c2 accordingly, hence cause the problem. ; diff --git a/llvm/test/Transforms/SROA/preserve-nonnull.ll b/llvm/test/Transforms/SROA/preserve-nonnull.ll --- a/llvm/test/Transforms/SROA/preserve-nonnull.ll +++ b/llvm/test/Transforms/SROA/preserve-nonnull.ll @@ -38,7 +38,7 @@ ; Make sure we properly handle the !nonnull attribute when we convert ; a pointer load to an integer load. -; FIXME: While this doesn't do anythnig actively harmful today, it really +; FIXME: While this doesn't do anything actively harmful today, it really ; should propagate the !nonnull metadata to range metadata. The irony is, it ; *does* initially, but then we lose that !range metadata before we finish ; SROA. diff --git a/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll b/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll --- a/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll +++ b/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll @@ -73,7 +73,7 @@ !15 = !DILocation(line: 6, scope: !12) -;; Check the profile of funciton sum is only merged once though the original callsite is replicted. +;; Check the profile of function sum is only merged once though the original callsite is replicted. ; CHECK: name: "sum" ; CHECK-NEXT: {!"function_entry_count", i64 46} ; CHECK: !{!"branch_weights", i32 11, i32 37} diff --git a/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll b/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll --- a/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S -hoist-common-insts=true | FileCheck %s -; TODO: Track the acutal DebugLoc of the hoisted instruction when no-line +; TODO: Track the actual DebugLoc of the hoisted instruction when no-line ; DebugLoc is supported (https://reviews.llvm.org/D24180) ; Checks if the debug info for hoisted "x = i" is removed and diff --git a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll --- a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll +++ b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll @@ -362,7 +362,7 @@ ret void } -; Test edge splitting when the default target has icmp and unconditinal +; Test edge splitting when the default target has icmp and unconditional ; branch define i1 @test9(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: @test9( diff --git a/llvm/test/Transforms/SimplifyCFG/switch_thread.ll b/llvm/test/Transforms/SimplifyCFG/switch_thread.ll --- a/llvm/test/Transforms/SimplifyCFG/switch_thread.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_thread.ll @@ -98,7 +98,7 @@ i32 17, label %C i32 42, label %D ] -;; unreacahble. +;; unreachable. C: ; preds = %A, %A call void @DEAD( ) ret void diff --git a/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test b/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test --- a/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test +++ b/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test @@ -10,7 +10,7 @@ following aspects: - The padding of executable sections (lld uses 0xcc, which is int3 on x86) - The actual layout of the string table (it can be filled linearly, - strings can be dedupliated, the table can be optimized by sharing tails + strings can be deduplicated, the table can be optimized by sharing tails of longer strings; different parts in llvm do each of these three options) - The size indication for an empty/missing string table can either be 4 or left out altogether diff --git a/llvm/test/tools/llvm-objcopy/COFF/remove-section.test b/llvm/test/tools/llvm-objcopy/COFF/remove-section.test --- a/llvm/test/tools/llvm-objcopy/COFF/remove-section.test +++ b/llvm/test/tools/llvm-objcopy/COFF/remove-section.test @@ -46,7 +46,7 @@ # # Removing the .bss section removes one symbol and its aux symbol, # and updates the section indices in symbols pointing to later -# symbols, including the aux section defintitions. +# symbols, including the aux section definitions. # # Testing that the absolute symbol @feat.00 survives the section number # mangling. diff --git a/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test b/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test --- a/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test +++ b/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test @@ -124,7 +124,7 @@ Content: "0041004200" ## '\0', 'A', '\0', 'B', '\0' Symbols: - StName: 1 ## 'A' - - StName: 0xFF ## An arbitrary currupted index in the string table. + - StName: 0xFF ## An arbitrary corrupted index in the string table. - StName: 3 ## 'B' ## Check we report a warning when a relocation section is not present. diff --git a/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test b/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test --- a/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test +++ b/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test @@ -15,7 +15,7 @@ Data: ELFDATA2LSB Type: ET_DYN -## Check that we dump all possbile dynamic relocation sections. +## Check that we dump all possible dynamic relocation sections. # RUN: yaml2obj --docnum=2 %s -o %t2.1 # RUN: llvm-readobj --dyn-relocations %t2.1 2>&1 | \ # RUN: FileCheck %s --implicit-check-not=warning: --check-prefix=LLVM-RELOCS diff --git a/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test b/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test --- a/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test +++ b/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test @@ -76,7 +76,7 @@ 0xDD, 0xEE, 0xFF, 0x1E, ## ODK_REGINFO: bit-mask of used co-processor registers (2). 0x2E, 0x3E, 0x4E, 0x5E, ## ODK_REGINFO: bit-mask of used co-processor registers (3). 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, ## ODK_REGINFO: gp register value. -## A descriptor for one more arbirtary supported option. +## A descriptor for one more arbitrary supported option. 0x1, 0x28, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, diff --git a/llvm/test/tools/obj2yaml/ELF/offset.yaml b/llvm/test/tools/obj2yaml/ELF/offset.yaml --- a/llvm/test/tools/obj2yaml/ELF/offset.yaml +++ b/llvm/test/tools/obj2yaml/ELF/offset.yaml @@ -1,5 +1,5 @@ ## Check how the "Offset" field is dumped by obj2yaml. -## For each section we calulate the expected offset. +## For each section we calculate the expected offset. ## When it does not match the actual offset, we emit the "Offset" key. # RUN: yaml2obj %s -o %t1.o diff --git a/llvm/tools/llvm-ar/llvm-ar.cpp b/llvm/tools/llvm-ar/llvm-ar.cpp --- a/llvm/tools/llvm-ar/llvm-ar.cpp +++ b/llvm/tools/llvm-ar/llvm-ar.cpp @@ -1325,7 +1325,7 @@ cl::ExpandResponseFiles(Saver, getRspQuoting(makeArrayRef(argv, argc)), Argv); - // Get BitMode from enviorment variable "OBJECT_MODE" for AIX OS, if + // Get BitMode from environment variable "OBJECT_MODE" for AIX OS, if // specified. if (object::Archive::getDefaultKindForHost() == object::Archive::K_AIXBIG) { BitMode = getBitMode(getenv("OBJECT_MODE")); diff --git a/llvm/tools/llvm-cov/CoverageReport.cpp b/llvm/tools/llvm-cov/CoverageReport.cpp --- a/llvm/tools/llvm-cov/CoverageReport.cpp +++ b/llvm/tools/llvm-cov/CoverageReport.cpp @@ -108,7 +108,7 @@ OS << '-'; } -/// Return the color which correponds to the coverage percentage of a +/// Return the color which corresponds to the coverage percentage of a /// certain metric. template raw_ostream::Colors determineCoveragePercentageColor(const T &Info) { diff --git a/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp b/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp --- a/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp +++ b/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp @@ -276,7 +276,7 @@ DebugInfoLinker.setUpdate(!Options.DoGarbageCollection); std::vector> ObjectsForLinking(1); - std::vector> AddresssMapForLinking(1); + std::vector> AddressMapForLinking(1); std::vector EmptyWarnings; std::unique_ptr Context = DWARFContext::create(File); @@ -292,12 +292,12 @@ } // Add object files to the DWARFLinker. - AddresssMapForLinking[0] = + AddressMapForLinking[0] = std::make_unique(*Context, Options, File); - ObjectsForLinking[0] = std::make_unique( - File.getFileName(), &*Context, AddresssMapForLinking[0].get(), - EmptyWarnings); + ObjectsForLinking[0] = + std::make_unique(File.getFileName(), &*Context, + AddressMapForLinking[0].get(), EmptyWarnings); for (size_t I = 0; I < ObjectsForLinking.size(); I++) DebugInfoLinker.addObjectFile(*ObjectsForLinking[I]); diff --git a/llvm/tools/llvm-exegesis/lib/Analysis.cpp b/llvm/tools/llvm-exegesis/lib/Analysis.cpp --- a/llvm/tools/llvm-exegesis/lib/Analysis.cpp +++ b/llvm/tools/llvm-exegesis/lib/Analysis.cpp @@ -394,7 +394,7 @@ SubtargetInfo_->getWriteLatencyEntry(RSC.SCDesc, I); OS << "
  • " << Entry->Cycles; if (RSC.SCDesc->NumWriteLatencyEntries > 1) { - // Dismabiguate if more than 1 latency. + // Disambiguate if more than 1 latency. OS << " (WriteResourceID " << Entry->WriteResourceID << ")"; } OS << "
  • "; diff --git a/llvm/tools/llvm-objdump/MachODump.cpp b/llvm/tools/llvm-objdump/MachODump.cpp --- a/llvm/tools/llvm-objdump/MachODump.cpp +++ b/llvm/tools/llvm-objdump/MachODump.cpp @@ -3404,7 +3404,7 @@ // These are structs in the Objective-C meta data and read to produce the // comments for disassembly. While these are part of the ABI they are no -// public defintions. So the are here not in include/llvm/BinaryFormat/MachO.h +// public definitions. So the are here not in include/llvm/BinaryFormat/MachO.h // . // The cfstring object in a 64-bit Mach-O file. diff --git a/llvm/tools/llvm-objdump/OtoolOpts.td b/llvm/tools/llvm-objdump/OtoolOpts.td --- a/llvm/tools/llvm-objdump/OtoolOpts.td +++ b/llvm/tools/llvm-objdump/OtoolOpts.td @@ -14,7 +14,7 @@ def h : Flag<["-"], "h">, HelpText<"print mach header">; def I : Flag<["-"], "I">, HelpText<"print indirect symbol table">; def j : Flag<["-"], "j">, HelpText<"print opcode bytes">; -def l : Flag<["-"], "l">, HelpText<"print load commnads">; +def l : Flag<["-"], "l">, HelpText<"print load commands">; def L : Flag<["-"], "L">, HelpText<"print used shared libraries">; def mcpu_EQ : Joined<["-"], "mcpu=">, HelpText<"select cpu for disassembly">; def o : Flag<["-"], "o">, HelpText<"print Objective-C segment">; diff --git a/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp b/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp --- a/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp +++ b/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp @@ -527,7 +527,7 @@ cl::opt DumpTypeDependents( "dependents", - cl::desc("In conjunection with -type-index and -id-index, dumps the entire " + cl::desc("In conjunction with -type-index and -id-index, dumps the entire " "dependency graph for the specified index instead of " "just the single record with the specified index"), cl::cat(TypeOptions), cl::sub(DumpSubcommand)); diff --git a/llvm/tools/llvm-profgen/PerfReader.cpp b/llvm/tools/llvm-profgen/PerfReader.cpp --- a/llvm/tools/llvm-profgen/PerfReader.cpp +++ b/llvm/tools/llvm-profgen/PerfReader.cpp @@ -1170,7 +1170,7 @@ emitWarningSummary( BogusRange, TotalRangeNum, "of samples are from ranges that have range start after or too far from " - "range end acrossing the unconditinal jmp."); + "range end across the unconditional jmp."); } void PerfScriptReader::parsePerfTraces() { diff --git a/llvm/tools/llvm-profgen/ProfiledBinary.h b/llvm/tools/llvm-profgen/ProfiledBinary.h --- a/llvm/tools/llvm-profgen/ProfiledBinary.h +++ b/llvm/tools/llvm-profgen/ProfiledBinary.h @@ -149,7 +149,7 @@ // size with the best matching context, which is used to help pre-inliner use // accurate post-optimization size to make decisions. // TODO: If an inlinee is completely optimized away, ideally we should have zero -// for its context size, currently we would misss such context since it doesn't +// for its context size, currently we would miss such context since it doesn't // have instructions. To fix this, we need to mark all inlinee with entry probe // but without instructions as having zero size. class BinarySizeContextTracker { diff --git a/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp b/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp --- a/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp +++ b/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp @@ -289,7 +289,7 @@ SymbolizerOptTable &Tbl) { StringRef ToolName = IsAddr2Line ? "llvm-addr2line" : "llvm-symbolizer"; // The environment variable specifies initial options which can be overridden - // by commnad line options. + // by command line options. Tbl.setInitialOptionsFromEnvironment(IsAddr2Line ? "LLVM_ADDR2LINE_OPTS" : "LLVM_SYMBOLIZER_OPTS"); bool HasError = false; diff --git a/llvm/tools/llvm-xray/xray-account.cpp b/llvm/tools/llvm-xray/xray-account.cpp --- a/llvm/tools/llvm-xray/xray-account.cpp +++ b/llvm/tools/llvm-xray/xray-account.cpp @@ -78,7 +78,7 @@ "sort", cl::desc("sort output by this field"), cl::value_desc("field"), cl::sub(Account), cl::init(SortField::FUNCID), cl::values(clEnumValN(SortField::FUNCID, "funcid", "function id"), - clEnumValN(SortField::COUNT, "count", "funciton call counts"), + clEnumValN(SortField::COUNT, "count", "function call counts"), clEnumValN(SortField::MIN, "min", "minimum function durations"), clEnumValN(SortField::MED, "med", "median function durations"), clEnumValN(SortField::PCT90, "90p", "90th percentile durations"), diff --git a/llvm/tools/llvm-xray/xray-graph.cpp b/llvm/tools/llvm-xray/xray-graph.cpp --- a/llvm/tools/llvm-xray/xray-graph.cpp +++ b/llvm/tools/llvm-xray/xray-graph.cpp @@ -198,7 +198,7 @@ // example caused by tail call elimination and if the option is enabled then // then tries to recover from this. // -// This funciton will also error if the records are out of order, as the trace +// This function will also error if the records are out of order, as the trace // is expected to be sorted. // // The graph generated has an immaginary root for functions called by no-one at diff --git a/llvm/unittests/ADT/FallibleIteratorTest.cpp b/llvm/unittests/ADT/FallibleIteratorTest.cpp --- a/llvm/unittests/ADT/FallibleIteratorTest.cpp +++ b/llvm/unittests/ADT/FallibleIteratorTest.cpp @@ -68,7 +68,7 @@ friend bool operator==(const FallibleCollectionWalker &LHS, const FallibleCollectionWalker &RHS) { - assert(&LHS.C == &RHS.C && "Comparing iterators across collectionss."); + assert(&LHS.C == &RHS.C && "Comparing iterators across collections."); return LHS.Idx == RHS.Idx; } diff --git a/llvm/unittests/ADT/SequenceTest.cpp b/llvm/unittests/ADT/SequenceTest.cpp --- a/llvm/unittests/ADT/SequenceTest.cpp +++ b/llvm/unittests/ADT/SequenceTest.cpp @@ -1,4 +1,4 @@ -//===- SequenceTest.cpp - Unit tests for a sequence abstraciton -----------===// +//===- SequenceTest.cpp - Unit tests for a sequence abstraction -----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp --- a/llvm/unittests/Analysis/MemorySSATest.cpp +++ b/llvm/unittests/Analysis/MemorySSATest.cpp @@ -102,7 +102,7 @@ MemoryPhi *MP = MSSA.getMemoryAccess(Merge); EXPECT_NE(MP, nullptr); - // Create the load memory acccess + // Create the load memory access MemoryUse *LoadAccess = cast(Updater.createMemoryAccessInBB( LoadInst, MP, Merge, MemorySSA::Beginning)); MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); @@ -238,7 +238,7 @@ B.SetInsertPoint(Merge, Merge->begin()); LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); - // Create the load memory acccess + // Create the load memory access MemoryUse *LoadAccess = cast(Updater.createMemoryAccessInBB( LoadInst, nullptr, Merge, MemorySSA::Beginning)); Updater.insertUse(LoadAccess); @@ -905,7 +905,7 @@ setupAnalyses(); MemorySSA &MSSA = *Analyses->MSSA; MemorySSAUpdater Updater(&MSSA); - // Create the load memory acccess + // Create the load memory access LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg); MemoryUse *LoadAccess = cast(Updater.createMemoryAccessInBB( LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning)); diff --git a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp --- a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp +++ b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp @@ -74,7 +74,7 @@ } TEST_F(CoreAPIsStandardTest, ResolveUnrequestedSymbol) { - // Test that all symbols in a MaterializationUnit materialize corretly when + // Test that all symbols in a MaterializationUnit materialize correctly when // only a subset of symbols is looked up. // The aim here is to ensure that we're not relying on the query to set up // state needed to materialize the unrequested symbols. @@ -869,7 +869,7 @@ } TEST_F(CoreAPIsStandardTest, FailMaterializerWithUnqueriedSymbols) { - // Make sure that symbols with no queries aganist them still + // Make sure that symbols with no queries against them still // fail correctly. bool MaterializerRun = false; diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp --- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp +++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp @@ -4049,7 +4049,7 @@ EXPECT_NE(LockVar, nullptr); // Find the allocation of a local array that will be used to call the runtime - // reduciton function. + // reduction function. BasicBlock &AllocBlock = Outlined->getEntryBlock(); Value *LocalArray = nullptr; for (Instruction &I : AllocBlock) { diff --git a/llvm/utils/TableGen/CodeGenSchedule.h b/llvm/utils/TableGen/CodeGenSchedule.h --- a/llvm/utils/TableGen/CodeGenSchedule.h +++ b/llvm/utils/TableGen/CodeGenSchedule.h @@ -429,7 +429,7 @@ // List of unique SchedClasses. std::vector SchedClasses; - // Any inferred SchedClass has an index greater than NumInstrSchedClassses. + // Any inferred SchedClass has an index greater than NumInstrSchedClasses. unsigned NumInstrSchedClasses; RecVec ProcResourceDefs; diff --git a/llvm/utils/TableGen/DAGISelMatcher.h b/llvm/utils/TableGen/DAGISelMatcher.h --- a/llvm/utils/TableGen/DAGISelMatcher.h +++ b/llvm/utils/TableGen/DAGISelMatcher.h @@ -46,28 +46,28 @@ public: enum KindTy { // Matcher state manipulation. - Scope, // Push a checking scope. - RecordNode, // Record the current node. - RecordChild, // Record a child of the current node. - RecordMemRef, // Record the memref in the current node. - CaptureGlueInput, // If the current node has an input glue, save it. - MoveChild, // Move current node to specified child. - MoveParent, // Move current node to parent. + Scope, // Push a checking scope. + RecordNode, // Record the current node. + RecordChild, // Record a child of the current node. + RecordMemRef, // Record the memref in the current node. + CaptureGlueInput, // If the current node has an input glue, save it. + MoveChild, // Move current node to specified child. + MoveParent, // Move current node to parent. // Predicate checking. - CheckSame, // Fail if not same as prev match. - CheckChildSame, // Fail if child not same as prev match. + CheckSame, // Fail if not same as prev match. + CheckChildSame, // Fail if child not same as prev match. CheckPatternPredicate, - CheckPredicate, // Fail if node predicate fails. - CheckOpcode, // Fail if not opcode. - SwitchOpcode, // Dispatch based on opcode. - CheckType, // Fail if not correct type. - SwitchType, // Dispatch based on type. - CheckChildType, // Fail if child has wrong type. - CheckInteger, // Fail if wrong val. - CheckChildInteger, // Fail if child is wrong val. - CheckCondCode, // Fail if not condcode. - CheckChild2CondCode, // Fail if child is wrong condcode. + CheckPredicate, // Fail if node predicate fails. + CheckOpcode, // Fail if not opcode. + SwitchOpcode, // Dispatch based on opcode. + CheckType, // Fail if not correct type. + SwitchType, // Dispatch based on type. + CheckChildType, // Fail if child has wrong type. + CheckInteger, // Fail if wrong val. + CheckChildInteger, // Fail if child is wrong val. + CheckCondCode, // Fail if not condcode. + CheckChild2CondCode, // Fail if child is wrong condcode. CheckValueType, CheckComplexPat, CheckAndImm, @@ -76,7 +76,7 @@ CheckImmAllZerosV, CheckFoldableChainNode, - // Node creation/emisssion. + // Node creation/emission. EmitInteger, // Create a TargetConstant EmitStringInteger, // Create a TargetConstant from a string. EmitRegister, // Create a register. diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp --- a/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -3493,7 +3493,7 @@ bool OperandPredicateMatcher::isHigherPriorityThan( const OperandPredicateMatcher &B) const { // Generally speaking, an instruction is more important than an Int or a - // LiteralInt because it can cover more nodes but theres an exception to + // LiteralInt because it can cover more nodes but there's an exception to // this. G_CONSTANT's are less important than either of those two because they // are more permissive. @@ -5196,7 +5196,7 @@ // naming being the same. One possible solution would be to have // explicit operator for operation capture and reference those. // The plus side is that it would expose opportunities to share - // the capture accross rules. The downside is that it would + // the capture across rules. The downside is that it would // introduce a dependency between predicates (captures must happen // before their first use.) InstructionMatcher &InsnMatcherTemp = M.addInstructionMatcher(Src->getName()); diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py --- a/llvm/utils/UpdateTestChecks/common.py +++ b/llvm/utils/UpdateTestChecks/common.py @@ -599,7 +599,7 @@ else: # This means a previous RUN line produced a body for this function # that is different from the one produced by this current RUN line, - # so the body can't be common accross RUN lines. We use None to + # so the body can't be common across RUN lines. We use None to # indicate that. self._func_dict[prefix][func] = None else: diff --git a/llvm/utils/docker/build_docker_image.sh b/llvm/utils/docker/build_docker_image.sh --- a/llvm/utils/docker/build_docker_image.sh +++ b/llvm/utils/docker/build_docker_image.sh @@ -58,7 +58,7 @@ clang. mydocker/clang-debian10:latest - a small image with preinstalled clang. Please note that this example produces a not very useful installation, since it -doesn't override CMake defaults, which produces a Debug and non-boostrapped +doesn't override CMake defaults, which produces a Debug and non-bootstrapped version of clang. To get a 2-stage clang build, you could use this command: diff --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py --- a/llvm/utils/git/github-automation.py +++ b/llvm/utils/git/github-automation.py @@ -401,7 +401,7 @@ return False parser = argparse.ArgumentParser() -parser.add_argument('--token', type=str, required=True, help='GitHub authentiation token') +parser.add_argument('--token', type=str, required=True, help='GitHub authentication token') parser.add_argument('--repo', type=str, default=os.getenv('GITHUB_REPOSITORY', 'llvm/llvm-project'), help='The GitHub repository that we are working with in the form of / (e.g. llvm/llvm-project)') subparsers = parser.add_subparsers(dest='command') @@ -411,7 +411,7 @@ issue_subscriber_parser.add_argument('--issue-number', type=int, required=True) release_workflow_parser = subparsers.add_parser('release-workflow') -release_workflow_parser.add_argument('--llvm-project-dir', type=str, default='.', help='directory containing the llvm-project checout') +release_workflow_parser.add_argument('--llvm-project-dir', type=str, default='.', help='directory containing the llvm-project checkout') release_workflow_parser.add_argument('--issue-number', type=int, required=True, help='The issue number to update') release_workflow_parser.add_argument('--phab-token', type=str, help='Phabricator conduit API token. See https://reviews.llvm.org/settings/user//page/apitokens/') release_workflow_parser.add_argument('--branch-repo-token', type=str, diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn --- a/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn +++ b/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn @@ -19,7 +19,7 @@ "//llvm/lib/Support", ] - # XXX commment + # XXX comment include_dirs = [ "//lldb/source" ] sources = [ "FormatUtil.cpp", diff --git a/llvm/utils/lit/lit/ProgressBar.py b/llvm/utils/lit/lit/ProgressBar.py --- a/llvm/utils/lit/lit/ProgressBar.py +++ b/llvm/utils/lit/lit/ProgressBar.py @@ -38,7 +38,7 @@ >>> term = TerminalController() >>> if term.CLEAR_SCREEN: - ... print('This terminal supports clearning the screen.') + ... print('This terminal supports clearing the screen.') Finally, if the width and height of the terminal are known, then they will be stored in the `COLS` and `LINES` attributes. diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py --- a/llvm/utils/lit/lit/TestRunner.py +++ b/llvm/utils/lit/lit/TestRunner.py @@ -50,7 +50,7 @@ # during expansion. # # COMMAND that follows %dbg(ARG) is also captured. COMMAND can be -# empty as a result of conditinal substitution. +# empty as a result of conditional substitution. kPdbgRegex = '%dbg\\(([^)\'"]*)\\)(.*)' class ShellEnvironment(object): diff --git a/llvm/utils/unicode-case-fold.py b/llvm/utils/unicode-case-fold.py --- a/llvm/utils/unicode-case-fold.py +++ b/llvm/utils/unicode-case-fold.py @@ -62,7 +62,7 @@ # b is a list of mappings. All the mappings are assumed to have the same -# shift and the stride between adjecant mappings (if any) is constant. +# shift and the stride between adjacent mappings (if any) is constant. def dump_block(b): global body diff --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py --- a/llvm/utils/update_analyze_test_checks.py +++ b/llvm/utils/update_analyze_test_checks.py @@ -26,7 +26,7 @@ A common pattern is to have the script insert complete checking of every instruction. Then, edit it down to only check the relevant instructions. The script is designed to make adding checks to a test case fast, it is *not* -designed to be authoratitive about what constitutes a good test! +designed to be authoritative about what constitutes a good test! """ from __future__ import print_function diff --git a/llvm/utils/update_cc_test_checks.py b/llvm/utils/update_cc_test_checks.py --- a/llvm/utils/update_cc_test_checks.py +++ b/llvm/utils/update_cc_test_checks.py @@ -371,7 +371,7 @@ m = common.CHECK_RE.match(line) if m and m.group(1) in prefix_set: continue # Don't append the existing CHECK lines - # Skip special separator comments added by commmon.add_global_checks. + # Skip special separator comments added by common.add_global_checks. if line.strip() == '//' + common.SEPARATOR: continue if idx in line2func_list: diff --git a/mlir/docs/Bindings/Python.md b/mlir/docs/Bindings/Python.md --- a/mlir/docs/Bindings/Python.md +++ b/mlir/docs/Bindings/Python.md @@ -520,7 +520,7 @@ access and printing. The latter provide access to the defining block or operation and the position of the value within it. By default, the generic `Value` objects are returned from IR traversals. Downcasting is implemented -through concrete subclass constructors, similarly to attribtues and types: +through concrete subclass constructors, similarly to attributes and types: ```python from mlir.ir import BlockArgument, OpResult, Value diff --git a/mlir/docs/TargetLLVMIR.md b/mlir/docs/TargetLLVMIR.md --- a/mlir/docs/TargetLLVMIR.md +++ b/mlir/docs/TargetLLVMIR.md @@ -553,7 +553,7 @@ The "bare pointer" calling convention does not support unranked memrefs as their shape cannot be known at compile time. -### Generic alloction and deallocation functions +### Generic allocation and deallocation functions When converting the Memref dialect, allocations and deallocations are converted into calls to `malloc` (`aligned_alloc` if aligned allocations are requested) diff --git a/mlir/include/mlir/Analysis/DataFlowFramework.h b/mlir/include/mlir/Analysis/DataFlowFramework.h --- a/mlir/include/mlir/Analysis/DataFlowFramework.h +++ b/mlir/include/mlir/Analysis/DataFlowFramework.h @@ -379,7 +379,7 @@ /// dependents are placed on the worklist. /// /// The dependency graph does not need to be static. Each invocation of - /// `visit` can add new dependencies, but these dependecies will not be + /// `visit` can add new dependencies, but these dependencies will not be /// dynamically added to the worklist because the solver doesn't know what /// will provide a value for then. virtual LogicalResult visit(ProgramPoint point) = 0; diff --git a/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h b/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h --- a/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h +++ b/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h @@ -74,7 +74,7 @@ /// Remove the specified range of vars. void removeVarRange(VarKind kind, unsigned varStart, unsigned varLimit); - /// Given a MAF `other`, merges local variables such that both funcitons + /// Given a MAF `other`, merges local variables such that both functions /// have union of local vars, without changing the set of points in domain or /// the output. void mergeLocalVars(MultiAffineFunction &other); diff --git a/mlir/include/mlir/Analysis/Presburger/Simplex.h b/mlir/include/mlir/Analysis/Presburger/Simplex.h --- a/mlir/include/mlir/Analysis/Presburger/Simplex.h +++ b/mlir/include/mlir/Analysis/Presburger/Simplex.h @@ -563,7 +563,7 @@ /// negative for all values in the symbol domain, the row needs to be pivoted /// irrespective of the precise value of the symbols. To answer queries like /// "Is this symbolic sample always negative in the symbol domain?", we maintain -/// a `LexSimplex domainSimplex` correponding to the symbol domain. +/// a `LexSimplex domainSimplex` corresponding to the symbol domain. /// /// In other cases, it may be that the symbolic sample is violated at some /// values in the symbol domain and not violated at others. In this case, diff --git a/mlir/include/mlir/Bindings/Python/Attributes.td b/mlir/include/mlir/Bindings/Python/Attributes.td --- a/mlir/include/mlir/Bindings/Python/Attributes.td +++ b/mlir/include/mlir/Bindings/Python/Attributes.td @@ -21,7 +21,7 @@ string pythonType = p; } -// Mappings between supported builtin attribtues and Python types. +// Mappings between supported builtin attributes and Python types. def : PythonAttr<"::mlir::Attribute", "_ods_ir.Attribute">; def : PythonAttr<"::mlir::BoolAttr", "_ods_ir.BoolAttr">; def : PythonAttr<"::mlir::IntegerAttr", "_ods_ir.IntegerAttr">; diff --git a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td --- a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td @@ -51,7 +51,7 @@ The `amdgpu.raw_buffer_load` op is a wrapper around the buffer load intrinsics available on AMD GPUs, including extensions in newer GPUs. - The index into the buffer is computed as for `memref.load` with the additon + The index into the buffer is computed as for `memref.load` with the addition of `indexOffset` and `sgprOffset` (which **may or may not** be considered in bounds checks and includes any offset present on the memref type if it's non-zero). diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td --- a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td +++ b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td @@ -84,7 +84,7 @@ GPU operations implementing this interface take a list of dependencies as `gpu.async.token` arguments and optionally return a `gpu.async.token`. - The op doesn't start executing until all depent ops producing the async + The op doesn't start executing until all dependent ops producing the async dependency tokens have finished executing. If the op returns a token, the op merely schedules the execution on the diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -882,7 +882,7 @@ All the threads in the warp must execute the same `mma.sync` operation. For each possible multiplicand PTX data type, there are one or more possible - instruction shapes given as "mMnNkK". The below table describes the posssibilities + instruction shapes given as "mMnNkK". The below table describes the possibilities as well as the types required for the operands. Note that the data type for C (the accumulator) and D (the result) can vary independently when there are multiple possibilities in the "C/D Type" column. @@ -892,7 +892,7 @@ raised. `b1Op` is only relevant when the binary (b1) type is given to - `multiplicandDataType`. It specifies how the multiply-and-acumulate is + `multiplicandDataType`. It specifies how the multiply-and-accumulate is performed and is either `xor_popc` or `and_poc`. The default is `xor_popc`. `intOverflowBehavior` is only relevant when the `multiplicandType` attribute diff --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td --- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td +++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td @@ -12,7 +12,7 @@ // dialects and lower level NVVM dialect. This allow representing PTX specific // operations while using MLIR high level concepts like memref and 2-D vector. // -// Ops semantic are going to be based on vendor specific PTX defintion: +// Ops semantic are going to be based on vendor specific PTX definition: // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html // //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h b/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h --- a/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h +++ b/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h @@ -20,7 +20,7 @@ /// WarpExecuteOnLane0Op. /// The function needs to return an allocation that the lowering can use as /// temporary memory. The allocation needs to match the shape of the type (the - /// type may be VectorType or a scalar) and be availble for the current warp. + /// type may be VectorType or a scalar) and be available for the current warp. /// If there are several warps running in parallel the allocation needs to be /// split so that each warp has its own allocation. using WarpAllocationFn = diff --git a/mlir/include/mlir/IR/BuiltinAttributes.td b/mlir/include/mlir/IR/BuiltinAttributes.td --- a/mlir/include/mlir/IR/BuiltinAttributes.td +++ b/mlir/include/mlir/IR/BuiltinAttributes.td @@ -1104,7 +1104,7 @@ static FlatSymbolRefAttr get(StringAttr value); static FlatSymbolRefAttr get(MLIRContext *ctx, StringRef value); - /// Convenience getter for buliding a SymbolRefAttr based on an operation + /// Convenience getter for building a SymbolRefAttr based on an operation /// that implements the SymbolTrait. static FlatSymbolRefAttr get(Operation *symbol); diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td --- a/mlir/include/mlir/Transforms/Passes.td +++ b/mlir/include/mlir/Transforms/Passes.td @@ -48,7 +48,7 @@ This is similar (but opposite) to loop-invariant code motion, which hoists operations out of regions executed more than once. The implementation of - control-flow sink uses a simple and conversative cost model: operations are + control-flow sink uses a simple and conservative cost model: operations are never duplicated and are only moved into singly-executed regions. It is recommended to run canonicalization first to remove unreachable diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -320,7 +320,7 @@ global = rewriter.create( loc, globalType, /*isConstant=*/true, LLVM::Linkage::Internal, stringConstName, - rewriter.getStringAttr(formatString), /*allignment=*/0, addressSpace); + rewriter.getStringAttr(formatString), /*alignment=*/0, addressSpace); } // Get a pointer to the format string's first element diff --git a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp @@ -55,7 +55,7 @@ }); } -/// A conversion patttern for detensoring `linalg.generic` ops. +/// A conversion pattern for detensoring `linalg.generic` ops. class DetensorizeGenericOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -411,7 +411,7 @@ Block *block = blockArg.getParentBlock(); // For the potentially detensorable block argument, find the - // correpsonding operands in predecessor blocks. + // corresponding operands in predecessor blocks. for (PredecessorIterator pred = block->pred_begin(); pred != block->pred_end(); ++pred) { BranchOpInterface terminator = diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -673,13 +673,13 @@ // • x == 0 -> -INF // • x < 0 -> NAN // • x == +INF -> +INF - Value aproximation = builder.create( + Value approximation = builder.create( zeroMask, cstMinusInf, builder.create( invalidMask, cstNan, builder.create(posInfMask, cstPosInf, x))); - rewriter.replaceOp(op, aproximation); + rewriter.replaceOp(op, approximation); return success(); } diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp @@ -284,7 +284,7 @@ propagateShapesInRegion(func.getBody()); - // Insert UnrealizedConversionCasts to guarantee ReturnOp agress with + // Insert UnrealizedConversionCasts to guarantee ReturnOp agrees with // the FuncOp type. func.walk([&](func::ReturnOp op) { func::FuncOp parent = dyn_cast(op->getParentOp()); diff --git a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp --- a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp +++ b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp @@ -286,7 +286,7 @@ } else { // Update group pending tokens when token will become ready. Because this // will happen asynchronously we must ensure that `group` is alive until - // then, and re-ackquire the lock. + // then, and re-acquire the lock. group->addRef(); token->awaiters.emplace_back([group, onTokenReady]() { diff --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp --- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp +++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp @@ -108,7 +108,7 @@ /// Emits a label for the block. LogicalResult emitLabel(Block &block); - /// Emits the operands and atttributes of the operation. All operands are + /// Emits the operands and attributes of the operation. All operands are /// emitted first and then all attributes in alphabetical order. LogicalResult emitOperandsAndAttributes(Operation &op, ArrayRef exclude = {}); diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp @@ -320,7 +320,7 @@ kDeviceCopyinFlag | kHoldFlag, flags, names, index, mapperAllocas))) return failure(); - // TODO copyin readonly currenlty handled as copyin. Update when extension + // TODO copyin readonly currently handled as copyin. Update when extension // available. if (failed(processOperands(builder, moduleTranslation, op, op.copyinReadonlyOperands(), totalNbOperand, @@ -333,7 +333,7 @@ kHostCopyoutFlag | kHoldFlag, flags, names, index, mapperAllocas))) return failure(); - // TODO copyout zero currenlty handled as copyout. Update when extension + // TODO copyout zero currently handled as copyout. Update when extension // available. if (failed(processOperands(builder, moduleTranslation, op, op.copyoutZeroOperands(), totalNbOperand, @@ -346,7 +346,7 @@ kCreateFlag | kHoldFlag, flags, names, index, mapperAllocas))) return failure(); - // TODO create zero currenlty handled as create. Update when extension + // TODO create zero currently handled as create. Update when extension // available. if (failed(processOperands(builder, moduleTranslation, op, op.createZeroOperands(), totalNbOperand, diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp --- a/mlir/lib/Transforms/Utils/RegionUtils.cpp +++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp @@ -546,7 +546,7 @@ // If the lhs or rhs has external uses, the blocks cannot be merged as the // merged version of this operation will not be either the lhs or rhs - // alone (thus semantically incorrect), but some mix dependending on which + // alone (thus semantically incorrect), but some mix depending on which // block preceeded this. // TODO allow merging of operations when one block does not dominate the // other diff --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py --- a/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py +++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py @@ -101,7 +101,7 @@ if not self.allow_new_dims: raise ValueError( f"New dimensions not allowed in the current affine expression: " - f"Requested '{dimname}', Availble: {self.all_dims}") + f"Requested '{dimname}', Available: {self.all_dims}") pos = len(self.all_dims) self.all_dims[dimname] = pos self.local_dims[dimname] = pos @@ -114,7 +114,7 @@ if not self.allow_new_symbols: raise ValueError( f"New symbols not allowed in the current affine expression: " - f"Requested '{symname}', Availble: {self.all_symbols}") + f"Requested '{symname}', Available: {self.all_symbols}") pos = len(self.all_symbols) self.all_symbols[symname] = pos self.local_symbols[symname] = pos diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir --- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir +++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir @@ -24,7 +24,7 @@ // ----- -// Same as above but with fp32 acumulation type. +// Same as above but with fp32 accumulation type. // CHECK-LABEL: @m16n8k16_fp16_fp32 func.func @m16n8k16_fp16_fp32(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2: vector<2x2xf32>) -> vector<2x2xf32> { diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -947,7 +947,7 @@ class _SparseValueInfo(enum.Enum): """Describes how a sparse tensor value is stored. - _UNPACKED: The sparse tensor value is stored as (coordnates, values) in + _UNPACKED: The sparse tensor value is stored as (coordinates, values) in Python. _PACKED: The sparse tensor value is stored as a C pointer to a packed MLIR sparse tensor. @@ -1165,7 +1165,7 @@ def to_array(self) -> np.ndarray: """Returns the numpy array for the Tensor. - This is currenly only implemented for dense Tensor. + This is currently only implemented for dense Tensor. """ if not self.is_dense(): raise ValueError("Conversion from non-dense Tensor " diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -502,7 +502,7 @@ // {3}: documentation (summary + description) // {4}: op attribute list // {5}: builder methods taking standalone attribute parameters -// {6}: additional method defintions +// {6}: additional method definitions // {7}: additional methods for attributes used by indexing maps static const char structuredOpOdsHeaderFormat[] = R"FMT( //===----------------------------------------------------------------------===// diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp --- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp @@ -402,7 +402,7 @@ using IteratorT = ParameterElement *const *; IteratorT it = params.begin(); - // Find the last required parameter. Commas become optional aftewards. + // Find the last required parameter. Commas become optional afterwards. // Note: IteratorT's copy assignment is deleted. ParameterElement *lastReq = nullptr; for (ParameterElement *param : params) diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -172,7 +172,7 @@ /// This class represents a group of order-independent optional clauses. Each /// clause starts with a literal element and has a coressponding parsing -/// element. A parsing element is a continous sequence of format elements. +/// element. A parsing element is a continuous sequence of format elements. /// Each clause can appear 0 or 1 time. class OIListElement : public DirectiveElementBase { public: diff --git a/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp b/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp --- a/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp +++ b/openmp/libomptarget/plugins/amdgpu/src/rtl.cpp @@ -782,7 +782,7 @@ Region, HSA_AMD_MEMORY_POOL_INFO_ACCESSIBLE_BY_ALL, &Access), "Error returned from hsa_amd_memory_pool_get_info when obtaining " "HSA_AMD_MEMORY_POOL_INFO_ACCESSIBLE_BY_ALL\n"); - printf(" Accessable by all: \t\t %s\n", + printf(" Accessible by all: \t\t %s\n", (Access ? "TRUE" : "FALSE")); return HSA_STATUS_SUCCESS; diff --git a/openmp/runtime/src/i18n/en_US.txt b/openmp/runtime/src/i18n/en_US.txt --- a/openmp/runtime/src/i18n/en_US.txt +++ b/openmp/runtime/src/i18n/en_US.txt @@ -18,7 +18,7 @@ # placeholders may be changed, e.g. "File %1$s line %2$d" may be safely edited to # "Line %2$d file %1$s"). # * Adding new message to the end of section. -# Incompatible changes (version must be bumbed by 1): +# Incompatible changes (version must be bumped by 1): # * Introducing new placeholders to existing messages. # * Changing type of placeholders (e.g. "line %1$d" -> "line %1$s"). # * Rearranging order of messages. @@ -477,8 +477,8 @@ AffHWSubsetAttrRepeat "KMP_HW_SUBSET ignored: %1$s: attribute specified more than once." AffHWSubsetAttrInvalid "KMP_HW_SUBSET ignored: %1$s: attribute value %2$s is invalid." AffHWSubsetAllFiltered "KMP_HW_SUBSET ignored: all hardware resources would be filtered, please reduce the filter." -AffHWSubsetAttrsNonHybrid "KMP_HW_SUBSET ignored: Too many attributes specified. This machine is not a hybrid architecutre." -AffHWSubsetIgnoringAttr "KMP_HW_SUBSET: ignoring %1$s attribute. This machine is not a hybrid architecutre." +AffHWSubsetAttrsNonHybrid "KMP_HW_SUBSET ignored: Too many attributes specified. This machine is not a hybrid architecture." +AffHWSubsetIgnoringAttr "KMP_HW_SUBSET: ignoring %1$s attribute. This machine is not a hybrid architecture." # -------------------------------------------------------------------------------------------------- -*- HINTS -*- diff --git a/openmp/runtime/src/kmp_csupport.cpp b/openmp/runtime/src/kmp_csupport.cpp --- a/openmp/runtime/src/kmp_csupport.cpp +++ b/openmp/runtime/src/kmp_csupport.cpp @@ -361,7 +361,7 @@ @param num_threads number of threads per team requested for the teams construct Set the number of teams to be used by the teams construct. The number of initial -teams cretaed will be greater than or equal to the lower bound and less than or +teams created will be greater than or equal to the lower bound and less than or equal to the upper bound. This call is only required if the teams construct has a `num_teams` clause or a `thread_limit` clause (or both). diff --git a/openmp/runtime/src/kmp_itt.h b/openmp/runtime/src/kmp_itt.h --- a/openmp/runtime/src/kmp_itt.h +++ b/openmp/runtime/src/kmp_itt.h @@ -327,7 +327,7 @@ #else /* USE_ITT_BUILD */ // Null definitions of the synchronization tracing functions. -// If USE_ITT_BULID is not enabled, USE_ITT_NOTIFY cannot be either. +// If USE_ITT_BUILD is not enabled, USE_ITT_NOTIFY cannot be either. // By defining these we avoid unpleasant ifdef tests in many places. #define KMP_FSYNC_PREPARE(obj) ((void)0) #define KMP_FSYNC_CANCEL(obj) ((void)0) diff --git a/openmp/runtime/test/affinity/libomp_test_topology.h b/openmp/runtime/test/affinity/libomp_test_topology.h --- a/openmp/runtime/test/affinity/libomp_test_topology.h +++ b/openmp/runtime/test/affinity/libomp_test_topology.h @@ -532,7 +532,7 @@ if (current_place != parent->place_nums[j]) { proc_bind_die( proc_bind, T, P, - "Thread %d's place (%d) is not corret. Expected %d\n", i, + "Thread %d's place (%d) is not correct. Expected %d\n", i, partitions[i]->current_place, parent->place_nums[j]); } count = 1; diff --git a/openmp/runtime/tools/lib/tools.pm b/openmp/runtime/tools/lib/tools.pm --- a/openmp/runtime/tools/lib/tools.pm +++ b/openmp/runtime/tools/lib/tools.pm @@ -235,7 +235,7 @@ B -It is very simple wrapper arounf Getopt::Long::GetOptions. It passes all arguments to GetOptions, +It is very simple wrapper around Getopt::Long::GetOptions. It passes all arguments to GetOptions, and add definitions for standard help options: --help, --doc, --verbose, and --quiet. When GetOptions finishes, this subroutine checks exit code, if it is non-zero, standard error message is issued and script terminated. diff --git a/polly/docs/HowToManuallyUseTheIndividualPiecesOfPolly.rst b/polly/docs/HowToManuallyUseTheIndividualPiecesOfPolly.rst --- a/polly/docs/HowToManuallyUseTheIndividualPiecesOfPolly.rst +++ b/polly/docs/HowToManuallyUseTheIndividualPiecesOfPolly.rst @@ -29,7 +29,7 @@ Polly is only able to work with code that matches a canonical form. To translate the LLVM-IR into this form we use a set of - canonicalication passes. They are scheduled by using + canonicalization passes. They are scheduled by using '-polly-canonicalize'. .. code-block:: console diff --git a/polly/include/polly/Support/VirtualInstruction.h b/polly/include/polly/Support/VirtualInstruction.h --- a/polly/include/polly/Support/VirtualInstruction.h +++ b/polly/include/polly/Support/VirtualInstruction.h @@ -94,7 +94,7 @@ /// @param U The llvm::Use the get information for. /// @param LI The LoopInfo analysis. Needed to determine whether the /// value is synthesizable. - /// @param Virtual Whether to ignore existing MemoryAcccess. + /// @param Virtual Whether to ignore existing MemoryAccess. /// /// @return The VirtualUse representing the same use as @p U. static VirtualUse create(Scop *S, const Use &U, LoopInfo *LI, bool Virtual); diff --git a/polly/lib/CodeGen/ManagedMemoryRewrite.cpp b/polly/lib/CodeGen/ManagedMemoryRewrite.cpp --- a/polly/lib/CodeGen/ManagedMemoryRewrite.cpp +++ b/polly/lib/CodeGen/ManagedMemoryRewrite.cpp @@ -163,7 +163,7 @@ // in an expression. // We need this auxiliary function, because if we have a // `Constant` that is a user of `V`, we need to recurse into the -// `Constant`s uses to gather the root instruciton. +// `Constant`s uses to gather the root instruction. static void getInstructionUsersOfValue(Value *V, SmallVector &Owners) { if (auto *I = dyn_cast(V)) { diff --git a/polly/lib/Transform/ManualOptimizer.cpp b/polly/lib/Transform/ManualOptimizer.cpp --- a/polly/lib/Transform/ManualOptimizer.cpp +++ b/polly/lib/Transform/ManualOptimizer.cpp @@ -189,7 +189,7 @@ } // If illegal, revert and remove the transformation to not risk re-trying - // indefintely. + // indefinitely. MDNode *NewLoopMD = makePostTransformationMetadata(Ctx, LoopMD, {TransPrefix}, {}); BandAttr *Attr = getBandAttr(OrigBand); diff --git a/polly/lib/Transform/ScheduleTreeTransform.cpp b/polly/lib/Transform/ScheduleTreeTransform.cpp --- a/polly/lib/Transform/ScheduleTreeTransform.cpp +++ b/polly/lib/Transform/ScheduleTreeTransform.cpp @@ -573,7 +573,7 @@ // Do not merge permutable band to avoid loosing the permutability property. // Cannot collapse even two permutable loops, they might be permutable - // individually, but not necassarily accross. + // individually, but not necassarily across. if (unsignedFromIslSize(Band.n_member()) > 1u && Band.permutable()) return getBase().visitBand(Band); diff --git a/polly/test/CodeGen/non-affine-update.ll b/polly/test/CodeGen/non-affine-update.ll --- a/polly/test/CodeGen/non-affine-update.ll +++ b/polly/test/CodeGen/non-affine-update.ll @@ -10,7 +10,7 @@ ; } ; } -; Verify that all changed memory access functions are corectly code generated. +; Verify that all changed memory access functions are correctly code generated. ; At some point this did not work due to memory access identifiers not being ; unique within non-affine scop statements. diff --git a/polly/test/ForwardOpTree/changed-kind.ll b/polly/test/ForwardOpTree/changed-kind.ll --- a/polly/test/ForwardOpTree/changed-kind.ll +++ b/polly/test/ForwardOpTree/changed-kind.ll @@ -4,7 +4,7 @@ ; Thus, in order to save a scalar dependency, forward-optree replaces ; the use of %0 in Stmt_lor_end93 by a load from @c by changing the ; access find from a scalar access to a array accesses. -; llvm.org/PR48034 decribes a crash caused by the mid-processing change. +; llvm.org/PR48034 describes a crash caused by the mid-processing change. target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/polly/test/ScopDetect/non-affine-loop.ll b/polly/test/ScopDetect/non-affine-loop.ll --- a/polly/test/ScopDetect/non-affine-loop.ll +++ b/polly/test/ScopDetect/non-affine-loop.ll @@ -6,7 +6,7 @@ ; ; This function/region does contain a loop, however it is non-affine, hence the access ; A[i] is also. Furthermore, it is the only loop, thus when we over approximate -; non-affine loops __and__ accesses __and__ allow regins without a (affine) loop we will +; non-affine loops __and__ accesses __and__ allow regions without a (affine) loop we will ; detect it, otherwise we won't. ; ; void f(int *A) { diff --git a/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll b/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll --- a/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll +++ b/polly/test/ScopDetectionDiagnostics/ReportVariantBasePtr-01.ll @@ -12,7 +12,7 @@ ; The loads are currently just adds %7 to the list of required invariant loads ; and only -polly-scops checks whether it is actionally possible the be load ; hoisted. The SCoP is still rejected by -polly-detect because it may alias -; with %A and is not considered to be eligble for runtime alias checking. +; with %A and is not considered to be eligible for runtime alias checking. ; CHECK: remark: ReportVariantBasePtr01.c:6:8: The following errors keep this region from being a Scop. ; CHECK: remark: ReportVariantBasePtr01.c:7:5: Accesses to the arrays "A", " " may access the same memory. diff --git a/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll b/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll --- a/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll +++ b/polly/test/ScopInfo/aliasing_conditional_alias_groups_2.ll @@ -1,6 +1,6 @@ ; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s ; -; Check that we create two alias groups since the mininmal/maximal accesses +; Check that we create two alias groups since the minimal/maximal accesses ; depend on %b. ; ; CHECK: Alias Groups (2): diff --git a/polly/test/ScopInfo/aliasing_many_parameters_not_all_involved.ll b/polly/test/ScopInfo/aliasing_many_parameters_not_all_involved.ll --- a/polly/test/ScopInfo/aliasing_many_parameters_not_all_involved.ll +++ b/polly/test/ScopInfo/aliasing_many_parameters_not_all_involved.ll @@ -1,7 +1,7 @@ ; RUN: opt %loadPolly -polly-analysis-computeout=0 -polly-print-scops -polly-rtc-max-parameters=8 -disable-output < %s | FileCheck %s --check-prefix=MAX8 ; RUN: opt %loadPolly -polly-analysis-computeout=0 -polly-print-scops -polly-rtc-max-parameters=7 -disable-output < %s | FileCheck %s --check-prefix=MAX7 ; -; Check that we allow this SCoP even though it has 10 parameters involved in posisbly aliasing accesses. +; Check that we allow this SCoP even though it has 10 parameters involved in possibly aliasing accesses. ; However, only 7 are involved in accesses through B, 8 through C and none in accesses through A. ; ; MAX8-LABEL: Function: jd diff --git a/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll b/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll --- a/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll +++ b/polly/test/ScopInfo/extract_constant_factor_introduces_new_parameter.ll @@ -14,7 +14,7 @@ ; ; which introduced a new parameter (-1 + %b) * %a which was not registered ; correctly and consequently caused a crash due to an expression not being -; regiustered as a parameter. +; registered as a parameter. target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" diff --git a/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll b/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll --- a/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll +++ b/polly/test/ScopInfo/invalid_add_rec_after_invariant_load_remapping.ll @@ -1,6 +1,6 @@ ; RUN: opt %loadPolly -polly-print-scops -disable-output < %s ; -; This crased at some point as we place %1 and %4 in the same equivalence class +; This crashed at some point as we place %1 and %4 in the same equivalence class ; for invariant loads and when we remap SCEVs to use %4 instead of %1 AddRec SCEVs ; for the for.body.10 loop caused a crash as their operands were not invariant ; in the loop. While we know they are, ScalarEvolution does not. However, we can simply diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll --- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll +++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_3.ll @@ -4,7 +4,7 @@ ; Verify that we canonicalize accesses even tough one of the accesses (even ; the canonical base) has a partial execution context. This is correct as -; the combined execution context still coveres both accesses. +; the combined execution context still covers both accesses. ; CHECK: Invariant Accesses: { ; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0] diff --git a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll --- a/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll +++ b/polly/test/ScopInfo/invariant_load_canonicalize_array_baseptrs_4.ll @@ -3,7 +3,7 @@ ; RUN: | FileCheck %s ; Verify that a delinearized and a not delinearized access are not -; canonizalized. +; canonicalized. ; CHECK: Stmt_body1 ; CHECK-NEXT: Domain := diff --git a/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll b/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll --- a/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll +++ b/polly/test/ScopInfo/multidim_fold_constant_dim_zero.ll @@ -5,7 +5,7 @@ ; This test case at some point crashed Polly due to a 'division by zero' ; when trying to fold the constant dimension into outer dimension. ; We verify that this scop is detected without crash. We also test the -; output to undertand that the scop has been analyzed, but has also been +; output to understand that the scop has been analyzed, but has also been ; invalidated due to the zero size dimension. ; CHECK: Assumed Context: diff --git a/polly/test/ScopInfo/multidim_many_references.ll b/polly/test/ScopInfo/multidim_many_references.ll --- a/polly/test/ScopInfo/multidim_many_references.ll +++ b/polly/test/ScopInfo/multidim_many_references.ll @@ -26,7 +26,7 @@ ; m->rows, m->cols and m->deps happen before the scop. ; This test case verifies that the construction of the assumed context finishes -; successfully. Depending on how constraineds are accummulated in the assumed +; successfully. Depending on how constrained are accumulated in the assumed ; context, this test case can take even for a smaller number of arrays over a ; minute to complete. With the unrolling choosen in this test, an inefficient ; formulation of the assumption tracking cause LLVM to crash due to excessive diff --git a/polly/test/ScopInfo/ranged_parameter.ll b/polly/test/ScopInfo/ranged_parameter.ll --- a/polly/test/ScopInfo/ranged_parameter.ll +++ b/polly/test/ScopInfo/ranged_parameter.ll @@ -1,6 +1,6 @@ ; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s ; -; Check that the contstraints on the paramater derived from the +; Check that the constraints on the parameter derived from the ; range metadata (see bottom of the file) are present: ; ; CHECK: Context: diff --git a/polly/test/ScopInfo/redundant_parameter_constraint.ll b/polly/test/ScopInfo/redundant_parameter_constraint.ll --- a/polly/test/ScopInfo/redundant_parameter_constraint.ll +++ b/polly/test/ScopInfo/redundant_parameter_constraint.ll @@ -1,6 +1,6 @@ ; RUN: opt %loadPolly -polly-print-scops -disable-output < %s | FileCheck %s ; -; The constraint that r2 has to be bigger than r1 is implicitly containted in +; The constraint that r2 has to be bigger than r1 is implicitly contained in ; the domain, hence we do not want to see it explicitly. ; ; CHECK-NOT: r2 >= 1 + r1 diff --git a/polly/test/ScopInfo/scalar_to_array.ll b/polly/test/ScopInfo/scalar_to_array.ll --- a/polly/test/ScopInfo/scalar_to_array.ll +++ b/polly/test/ScopInfo/scalar_to_array.ll @@ -6,7 +6,7 @@ @A = common global [1024 x float] zeroinitializer, align 8 -; Terminating loops without side-effects will be optimzied away, hence +; Terminating loops without side-effects will be optimized away, hence ; detecting a scop would be pointless. ; CHECK-NOT: Function: empty ; Function Attrs: nounwind diff --git a/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll b/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll --- a/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll +++ b/polly/test/Simplify/out-of-scop-use-in-region-entry-phi-node.ll @@ -4,7 +4,7 @@ ; %tmp4, it is an "external use". ; ; A common mistake is to assume that %tmp5 is used by %tmp4 in bb3, when -; practially it's the incoming block %bb9 which is the user. +; practically it's the incoming block %bb9 which is the user. ; target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/polly/tools/GPURuntime/GPUJIT.h b/polly/tools/GPURuntime/GPUJIT.h --- a/polly/tools/GPURuntime/GPUJIT.h +++ b/polly/tools/GPURuntime/GPUJIT.h @@ -16,7 +16,7 @@ #include "stddef.h" /* - * The following demostrates how we can use the GPURuntime library to + * The following demonstrates how we can use the GPURuntime library to * execute a GPU kernel. * * char KernelString[] = "\n\ diff --git a/polly/www/documentation/gpgpucodegen.html b/polly/www/documentation/gpgpucodegen.html --- a/polly/www/documentation/gpgpucodegen.html +++ b/polly/www/documentation/gpgpucodegen.html @@ -195,7 +195,7 @@ llvm.codegen Intrinsic Implementation -Codeing Finished, To Be Reviewed +Coding Finished, To Be Reviewed diff --git a/polly/www/index.html b/polly/www/index.html --- a/polly/www/index.html +++ b/polly/www/index.html @@ -110,7 +110,7 @@ mappings are lowered to efficient C/LLVM code.
  • User-defined constraint sets for run-time checks We discuss how arbitrary sets of constraints can be used to automatically create run-time - checks that ensure a set of constrainst actually hold. This feature is + checks that ensure a set of constraints actually hold. This feature is very useful to verify at run-time various assumptions that have been taken program optimization. @@ -142,7 +142,7 @@ The support for ScopLib as an exchange format has been removed as recent versions of clan, candl and pluto all support the OpenScop exchange format. - The support of the external optmizer PoCC has been dropped in favor of the + The support of the external optimizer PoCC has been dropped in favor of the isl optimizer (default) and the still available pluto support. @@ -154,7 +154,7 @@ and David Peixotto's (both Qualcomm) recent commit to isl, isl's latest development version can be built with imath instead of - GMP. With both CLooG and gmp having become optional, the last obilgatory + GMP. With both CLooG and gmp having become optional, the last obligatory dependency to GPL licensed software has been removed. Now Polly only depends on isl (and the included imath), which are both MIT licensed. @@ -377,7 +377,7 @@
    2010
    -

    Dezember

    +

    December

    Basic vectorization support

    diff --git a/polly/www/performance.html b/polly/www/performance.html --- a/polly/www/performance.html +++ b/polly/www/performance.html @@ -24,7 +24,7 @@

    The results shown were created fully automatically without manual interaction. We did not yet spend any time to tune the results. Hence -further improvments may be achieved by tuning the code generated by Polly, the +further improvements may be achieved by tuning the code generated by Polly, the heuristics used by Pluto or by investigating if more code could be optimized. As Pluto was never used at such a low level, its heuristics are probably far from perfect. Another area where we expect larger performance improvements @@ -35,10 +35,10 @@

    The polybench test suite contains computation kernels from linear algebra routines, stencil computations, image processing and data mining. Polly -recognices the majority of them and is able to show good speedup. However, +recognizes the majority of them and is able to show good speedup. However, to show similar speedup on larger examples like the SPEC CPU benchmarks Polly still misses support for integer casts, variable-sized multi-dimensional arrays -and probably several other construts. This support is necessary as such +and probably several other constructs. This support is necessary as such constructs appear in larger programs, but not in our limited test suite.

    Sequential runs

    diff --git a/polly/www/projects.html b/polly/www/projects.html --- a/polly/www/projects.html +++ b/polly/www/projects.html @@ -26,7 +26,7 @@ LLVM Polly keeps here a list of open projects which each of themselves would be a great contribution to Polly. All of these projects are meant to be self contained and should take a newcomer around 3-4 months of work. The projects - we propose are all suiteable as Google Summer of Code projects. In case you are interested in a Google Summer of code project make sure to reach out via the Polly -Pointer Comparisions +Pointer Comparisons Done Johannes @@ -447,7 +447,7 @@ February 2012)

    -First optimizations to show the usefullness of Polly and enhance the user +First optimizations to show the usefulness of Polly and enhance the user experience. We also try to increase the amount of code we can optimize.

    diff --git a/utils/arcanist/clang-format.sh b/utils/arcanist/clang-format.sh --- a/utils/arcanist/clang-format.sh +++ b/utils/arcanist/clang-format.sh @@ -20,7 +20,7 @@ fi if ! git rev-parse --git-dir >/dev/null; then echo "advice" - echo "not in git repostitory; not linting file." + echo "not in git repository; not linting file." echo "====" exit 0 fi diff --git a/utils/bazel/README.md b/utils/bazel/README.md --- a/utils/bazel/README.md +++ b/utils/bazel/README.md @@ -54,7 +54,7 @@ You can instruct Bazel to use a ramdisk for its sandboxing operations via [--sandbox_base](https://docs.bazel.build/versions/main/command-line-reference.html#flag--sandbox_base), -which can help avoid IO bottlenecks for the symlink stragegy used for +which can help avoid IO bottlenecks for the symlink strategy used for sandboxing. This is especially important with many inputs and many cores (see https://github.com/bazelbuild/bazel/issues/11868): diff --git a/utils/bazel/deps_impl/terminfo_system.BUILD b/utils/bazel/deps_impl/terminfo_system.BUILD --- a/utils/bazel/deps_impl/terminfo_system.BUILD +++ b/utils/bazel/deps_impl/terminfo_system.BUILD @@ -4,7 +4,7 @@ # Wrapper library for some system terminfo. Using this only works if the # toolchain already has the relevant library search paths configured. It also -# sets the relevant LLVM `#define`s to enoble using terminfo. +# sets the relevant LLVM `#define`s to enable using terminfo. cc_library( name = "terminfo", defines = ["LLVM_ENABLE_TERMINFO=1"],