diff --git a/llvm/cmake/config.guess b/llvm/cmake/config.guess --- a/llvm/cmake/config.guess +++ b/llvm/cmake/config.guess @@ -4,7 +4,7 @@ # 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011 Free Software Foundation, Inc. -timestamp='2011-08-20' +timestamp='2022-08-04' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -1122,7 +1122,7 @@ # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; diff --git a/llvm/cmake/modules/AddLLVM.cmake b/llvm/cmake/modules/AddLLVM.cmake --- a/llvm/cmake/modules/AddLLVM.cmake +++ b/llvm/cmake/modules/AddLLVM.cmake @@ -506,7 +506,7 @@ add_dependencies(${obj_name} ${ARG_DEPENDS}) endif() # Treat link libraries like PUBLIC dependencies. LINK_LIBS might - # result in generating header files. Add a dependendency so that + # result in generating header files. Add a dependency so that # the generated header is created before this object library. if(ARG_LINK_LIBS) cmake_parse_arguments(LINK_LIBS_ARG @@ -782,7 +782,7 @@ # - LLVM_LINK_COMPONENTS: a list of component this component depends on # - COMPONENT_HAS_JIT: (only for group component) whether this target group # supports JIT compilation -# Additionnaly, the ADD_TO_COMPONENT option make it possible to add this +# Additionally, the ADD_TO_COMPONENT option make it possible to add this # component to the LLVM_LINK_COMPONENTS of . function(add_llvm_component_library name) cmake_parse_arguments(ARG diff --git a/llvm/cmake/modules/CoverageReport.cmake b/llvm/cmake/modules/CoverageReport.cmake --- a/llvm/cmake/modules/CoverageReport.cmake +++ b/llvm/cmake/modules/CoverageReport.cmake @@ -54,7 +54,7 @@ # This currently only works for LLVM, but could be expanded to work for all # sub-projects. The current limitation is based on not having a good way to -# automaticall plumb through the targets that we want to run coverage against. +# automatically plumb through the targets that we want to run coverage against. add_custom_target(generate-coverage-report COMMAND ${Python3_EXECUTABLE} ${PREPARE_CODE_COV_ARTIFACT} ${LLVM_PROFDATA} ${LLVM_COV} ${LLVM_PROFILE_DATA_DIR} diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -3990,7 +3990,7 @@ .. note:: - A '``poison``' value (decribed in the next section) should be used instead of + A '``poison``' value (described in the next section) should be used instead of '``undef``' whenever possible. Poison values are stronger than undef, and enable more optimizations. Just the existence of '``undef``' blocks certain optimizations (see the examples below). diff --git a/llvm/docs/MemorySSA.rst b/llvm/docs/MemorySSA.rst --- a/llvm/docs/MemorySSA.rst +++ b/llvm/docs/MemorySSA.rst @@ -470,10 +470,10 @@ results' precision provided by ``MemorySSA``. For example, AliasAnalysis has various caps, or restrictions on looking through phis which can affect what ``MemorySSA`` can infer. Changes made by different passes may make MemorySSA either "overly -optimized" (it can provide a more acccurate result than if it were recomputed +optimized" (it can provide a more accurate result than if it were recomputed from scratch), or "under optimized" (it could infer more if it were recomputed). This can lead to challenges to reproduced results in isolation with a single pass -when the result relies on the state aquired by ``MemorySSA`` due to being updated by +when the result relies on the state acquired by ``MemorySSA`` due to being updated by multiple subsequent passes. Passes that use and update ``MemorySSA`` should do so through the APIs provided by the ``MemorySSAUpdater``, or through calls on the Walker. diff --git a/llvm/include/llvm-c/Object.h b/llvm/include/llvm-c/Object.h --- a/llvm/include/llvm-c/Object.h +++ b/llvm/include/llvm-c/Object.h @@ -64,7 +64,7 @@ * appropriate implementation selected. The context may be NULL except if * the resulting file is an LLVM IR file. * - * The memory buffer is not consumed by this function. It is the responsibilty + * The memory buffer is not consumed by this function. It is the responsibility * of the caller to free it with \c LLVMDisposeMemoryBuffer. * * If NULL is returned, the \p ErrorMessage parameter is populated with the @@ -80,7 +80,7 @@ /** * Dispose of a binary file. * - * The binary file does not own its backing buffer. It is the responsibilty + * The binary file does not own its backing buffer. It is the responsibility * of the caller to free it with \c LLVMDisposeMemoryBuffer. */ void LLVMDisposeBinary(LLVMBinaryRef BR); diff --git a/llvm/include/llvm-c/Orc.h b/llvm/include/llvm-c/Orc.h --- a/llvm/include/llvm-c/Orc.h +++ b/llvm/include/llvm-c/Orc.h @@ -839,7 +839,7 @@ /** * Notify all not-yet-emitted covered by this MaterializationResponsibility * instance that an error has occurred. - * This will remove all symbols covered by this MaterializationResponsibilty + * This will remove all symbols covered by this MaterializationResponsibility * from the target JITDylib, and send an error to any queries waiting on * these symbols. */ diff --git a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h --- a/llvm/include/llvm/Analysis/BranchProbabilityInfo.h +++ b/llvm/include/llvm/Analysis/BranchProbabilityInfo.h @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This pass is used to evaluate branch probabilties. +// This pass is used to evaluate branch probabilities. // //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h --- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h @@ -103,7 +103,7 @@ Unsafe, }; - /// Dependece between memory access instructions. + /// Dependence between memory access instructions. struct Dependence { /// The type of the dependence. enum DepType { @@ -504,7 +504,7 @@ private: /// Groups pointers such that a single memcheck is required /// between two different groups. This will clear the CheckingGroups vector - /// and re-compute it. We will only group dependecies if \p UseDependencies + /// and re-compute it. We will only group dependencies if \p UseDependencies /// is true, otherwise we will create a separate group for each pointer. void groupChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies); diff --git a/llvm/include/llvm/Analysis/LoopAnalysisManager.h b/llvm/include/llvm/Analysis/LoopAnalysisManager.h --- a/llvm/include/llvm/Analysis/LoopAnalysisManager.h +++ b/llvm/include/llvm/Analysis/LoopAnalysisManager.h @@ -90,7 +90,7 @@ Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI), MSSAUsed(Arg.MSSAUsed) { // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. Arg.InnerAM = nullptr; } @@ -99,7 +99,7 @@ LI = RHS.LI; MSSAUsed = RHS.MSSAUsed; // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. RHS.InnerAM = nullptr; return *this; diff --git a/llvm/include/llvm/Analysis/LoopCacheAnalysis.h b/llvm/include/llvm/Analysis/LoopCacheAnalysis.h --- a/llvm/include/llvm/Analysis/LoopCacheAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopCacheAnalysis.h @@ -166,7 +166,7 @@ /// /// Intuitively a reference group represents memory references that access /// the same cache line. Conditions 1,2 above account for temporal reuse, while -/// contition 3 accounts for spacial reuse. +/// condition 3 accounts for spacial reuse. using ReferenceGroupTy = SmallVector, 8>; using ReferenceGroupsTy = SmallVector; diff --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h --- a/llvm/include/llvm/Analysis/MLModelRunner.h +++ b/llvm/include/llvm/Analysis/MLModelRunner.h @@ -18,7 +18,7 @@ /// MLModelRunner interface: abstraction of a mechanism for evaluating a /// tensorflow "saved model". -/// NOTE: feature indices are expected to be consistent all accross +/// NOTE: feature indices are expected to be consistent all across /// MLModelRunners (pertaining to the same model), and also Loggers (see /// TFUtils.h) class MLModelRunner { diff --git a/llvm/include/llvm/Analysis/RegionInfo.h b/llvm/include/llvm/Analysis/RegionInfo.h --- a/llvm/include/llvm/Analysis/RegionInfo.h +++ b/llvm/include/llvm/Analysis/RegionInfo.h @@ -547,7 +547,7 @@ /// /// After calling this function the BasicBlock RegionNodes will be stored at /// different memory locations. RegionNodes obtained before this function is - /// called are therefore not comparable to RegionNodes abtained afterwords. + /// called are therefore not comparable to RegionNodes obtained afterwords. void clearNodeCache(); /// @name Subregion Iterators diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -470,7 +470,7 @@ /// Relative lookup table entries consist of 32-bit offsets. /// Do not generate relative lookup tables for large code models - /// in 64-bit achitectures where 32-bit offsets might not be enough. + /// in 64-bit architectures where 32-bit offsets might not be enough. if (TM.getCodeModel() == CodeModel::Medium || TM.getCodeModel() == CodeModel::Large) return false; diff --git a/llvm/include/llvm/CodeGen/DFAPacketizer.h b/llvm/include/llvm/CodeGen/DFAPacketizer.h --- a/llvm/include/llvm/CodeGen/DFAPacketizer.h +++ b/llvm/include/llvm/CodeGen/DFAPacketizer.h @@ -178,7 +178,7 @@ return false; } - // Check if it is legal to prune dependece between SUI and SUJ. + // Check if it is legal to prune dependence between SUI and SUJ. virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) { return false; } diff --git a/llvm/include/llvm/CodeGen/LiveRangeCalc.h b/llvm/include/llvm/CodeGen/LiveRangeCalc.h --- a/llvm/include/llvm/CodeGen/LiveRangeCalc.h +++ b/llvm/include/llvm/CodeGen/LiveRangeCalc.h @@ -86,7 +86,7 @@ /// 2. LiveOut[MBB].second.getNode() == MBB /// The live-out value is defined in MBB. /// 3. forall P in preds(MBB): LiveOut[P] == LiveOut[MBB] - /// The live-out value passses through MBB. All predecessors must carry + /// The live-out value passes through MBB. All predecessors must carry /// the same value. /// /// The domtree node may be null, it can be computed. diff --git a/llvm/include/llvm/CodeGen/MIRPrinter.h b/llvm/include/llvm/CodeGen/MIRPrinter.h --- a/llvm/include/llvm/CodeGen/MIRPrinter.h +++ b/llvm/include/llvm/CodeGen/MIRPrinter.h @@ -34,7 +34,7 @@ /// you the correct list of successor blocks in most cases except for things /// like jump tables where the basic block references can't easily be found. /// The MIRPRinter will skip printing successors if they match the result of -/// this funciton and the parser will use this function to construct a list if +/// this function and the parser will use this function to construct a list if /// it is missing. void guessSuccessors(const MachineBasicBlock &MBB, SmallVectorImpl &Result, diff --git a/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h --- a/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h +++ b/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This pass is used to evaluate branch probabilties on machine basic blocks. +// This pass is used to evaluate branch probabilities on machine basic blocks. // //===----------------------------------------------------------------------===// diff --git a/llvm/include/llvm/CodeGen/RegisterBankInfo.h b/llvm/include/llvm/CodeGen/RegisterBankInfo.h --- a/llvm/include/llvm/CodeGen/RegisterBankInfo.h +++ b/llvm/include/llvm/CodeGen/RegisterBankInfo.h @@ -77,7 +77,7 @@ void print(raw_ostream &OS) const; /// Check that the Mask is compatible with the RegBank. - /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask, + /// Indeed, if the RegBank cannot accomodate the "active bits" of the mask, /// there is no way this mapping is valid. /// /// \note This method does not check anything when assertions are disabled. diff --git a/llvm/include/llvm/CodeGen/RegisterPressure.h b/llvm/include/llvm/CodeGen/RegisterPressure.h --- a/llvm/include/llvm/CodeGen/RegisterPressure.h +++ b/llvm/include/llvm/CodeGen/RegisterPressure.h @@ -367,7 +367,7 @@ /// Track the max pressure within the region traversed so far. RegisterPressure &P; - /// Run in two modes dependending on whether constructed with IntervalPressure + /// Run in two modes depending on whether constructed with IntervalPressure /// or RegisterPressure. If requireIntervals is false, LIS are ignored. bool RequireIntervals; diff --git a/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h b/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h --- a/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h +++ b/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h @@ -181,9 +181,9 @@ struct FixedSizeInfo { /// The fixed byte size for fixed size forms. uint16_t NumBytes = 0; - /// Number of DW_FORM_address forms in this abbrevation declaration. + /// Number of DW_FORM_address forms in this abbreviation declaration. uint8_t NumAddrs = 0; - /// Number of DW_FORM_ref_addr forms in this abbrevation declaration. + /// Number of DW_FORM_ref_addr forms in this abbreviation declaration. uint8_t NumRefAddrs = 0; /// Number of 4 byte in DWARF32 and 8 byte in DWARF64 forms. uint8_t NumDwarfOffsets = 0; diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/llvm/include/llvm/ExecutionEngine/Orc/Core.h --- a/llvm/include/llvm/ExecutionEngine/Orc/Core.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/Core.h @@ -613,7 +613,7 @@ /// Notify all not-yet-emitted covered by this MaterializationResponsibility /// instance that an error has occurred. - /// This will remove all symbols covered by this MaterializationResponsibilty + /// This will remove all symbols covered by this MaterializationResponsibility /// from the target JITDylib, and send an error to any queries waiting on /// these symbols. void failMaterialization(); diff --git a/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h b/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h --- a/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/DebugUtils.h @@ -60,7 +60,7 @@ raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap::value_type &KV); -/// Render a SymbolDependendeMap. +/// Render a SymbolDependenceMap. raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps); /// Render a MaterializationUnit. diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h --- a/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h @@ -27,7 +27,7 @@ public: /// Create from a ExecutorProcessControl instance alone. This will use /// the EPC's lookupSymbols method to find the registration/deregistration - /// funciton addresses by name. + /// function addresses by name. static Expected> Create(ExecutionSession &ES); diff --git a/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h --- a/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h +++ b/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h @@ -144,7 +144,7 @@ virtual bool needsToReserveAllocationSpace() { return false; } /// Override to return false to tell LLVM no stub space will be needed. - /// This requires some guarantees depending on architecuture, but when + /// This requires some guarantees depending on architecture, but when /// you know what you are doing it saves allocated space. virtual bool allowStubAllocation() const { return true; } diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h --- a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h @@ -62,7 +62,7 @@ #include "llvm/Frontend/OpenMP/OMPKinds.def" /// IDs for all omp runtime library ident_t flag encodings (see -/// their defintion in openmp/runtime/src/kmp.h). +/// their definition in openmp/runtime/src/kmp.h). enum class IdentFlag { #define OMP_IDENT_FLAG(Enum, Str, Value) Enum = Value, #include "llvm/Frontend/OpenMP/OMPKinds.def" diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPContext.h b/llvm/include/llvm/Frontend/OpenMP/OMPContext.h --- a/llvm/include/llvm/Frontend/OpenMP/OMPContext.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPContext.h @@ -115,7 +115,7 @@ /// Variant match information describes the required traits and how they are /// scored (via the ScoresMap). In addition, the required consturct nesting is -/// decribed as well. +/// described as well. struct VariantMatchInfo { /// Add the trait \p Property to the required trait set. \p RawString is the /// string we parsed and derived \p Property from. If \p Score is not null, it diff --git a/llvm/include/llvm/IR/GlobalValue.h b/llvm/include/llvm/IR/GlobalValue.h --- a/llvm/include/llvm/IR/GlobalValue.h +++ b/llvm/include/llvm/IR/GlobalValue.h @@ -472,7 +472,7 @@ return !mayBeDerefined(); } - /// Return true if this global has an exact defintion. + /// Return true if this global has an exact definition. bool hasExactDefinition() const { // While this computes exactly the same thing as // isStrongDefinitionForLinker, the intended uses are different. This diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -235,7 +235,7 @@ // MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0080: ALL DS instructions may be scheduled across SCHED_BARRIER. -// MASK = 0x0000 0100: ALL DS read instructions may be scheduled accoss SCHED_BARRIER. +// MASK = 0x0000 0100: ALL DS read instructions may be scheduled across SCHED_BARRIER. // MASK = 0x0000 0200: ALL DS write instructions may be scheduled across SCHED_BARRIER. def int_amdgcn_sched_barrier : ClangBuiltin<"__builtin_amdgcn_sched_barrier">, Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrConvergent, diff --git a/llvm/include/llvm/IR/PassManager.h b/llvm/include/llvm/IR/PassManager.h --- a/llvm/include/llvm/IR/PassManager.h +++ b/llvm/include/llvm/IR/PassManager.h @@ -680,7 +680,7 @@ /// cyclic dependencies between analysis results. /// /// This returns true if the given analysis's result is invalid. Any - /// dependecies on it will become invalid as a result. + /// dependencies on it will become invalid as a result. template bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) { using ResultModelT = @@ -944,7 +944,7 @@ Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)) { // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. Arg.InnerAM = nullptr; } @@ -962,7 +962,7 @@ Result &operator=(Result &&RHS) { InnerAM = RHS.InnerAM; // We have to null out the analysis manager in the moved-from state - // because we are taking ownership of the responsibilty to clear the + // because we are taking ownership of the responsibility to clear the // analysis state. RHS.InnerAM = nullptr; return *this; diff --git a/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h b/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h --- a/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h +++ b/llvm/include/llvm/LTO/legacy/ThinLTOCodeGenerator.h @@ -106,7 +106,7 @@ * \defgroup Cache controlling options * * These entry points control the ThinLTO cache. The cache is intended to - * support incremental build, and thus needs to be persistent accross build. + * support incremental build, and thus needs to be persistent across build. * The client enabled the cache by supplying a path to an existing directory. * The code generator will use this to store objects files that may be reused * during a subsequent build. diff --git a/llvm/include/llvm/MC/MCStreamer.h b/llvm/include/llvm/MC/MCStreamer.h --- a/llvm/include/llvm/MC/MCStreamer.h +++ b/llvm/include/llvm/MC/MCStreamer.h @@ -889,7 +889,7 @@ /// "foo.c"' assembler directive. virtual void emitFileDirective(StringRef Filename); - /// Emit ".file assembler diretive with additioal info. + /// Emit ".file assembler diretive with additional info. virtual void emitFileDirective(StringRef Filename, StringRef CompilerVerion, StringRef TimeStamp, StringRef Description); diff --git a/llvm/include/llvm/Support/BranchProbability.h b/llvm/include/llvm/Support/BranchProbability.h --- a/llvm/include/llvm/Support/BranchProbability.h +++ b/llvm/include/llvm/Support/BranchProbability.h @@ -56,7 +56,7 @@ static BranchProbability getBranchProbability(uint64_t Numerator, uint64_t Denominator); - // Normalize given probabilties so that the sum of them becomes approximate + // Normalize given probabilities so that the sum of them becomes approximate // one. template static void normalizeProbabilities(ProbabilityIter Begin, diff --git a/llvm/include/llvm/Support/RWMutex.h b/llvm/include/llvm/Support/RWMutex.h --- a/llvm/include/llvm/Support/RWMutex.h +++ b/llvm/include/llvm/Support/RWMutex.h @@ -19,7 +19,7 @@ #include #include -// std::shared_timed_mutex is only availble on macOS 10.12 and later. +// std::shared_timed_mutex is only available on macOS 10.12 and later. #if defined(__APPLE__) && defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) #if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200 #define LLVM_USE_RW_MUTEX_IMPL diff --git a/llvm/include/llvm/Support/TypeName.h b/llvm/include/llvm/Support/TypeName.h --- a/llvm/include/llvm/Support/TypeName.h +++ b/llvm/include/llvm/Support/TypeName.h @@ -18,7 +18,7 @@ /// /// This routine may fail on some platforms or for particularly unusual types. /// Do not use it for anything other than logging and debugging aids. It isn't -/// portable or dependendable in any real sense. +/// portable or dependable in any real sense. /// /// The returned StringRef will point into a static storage duration string. /// However, it may not be null terminated and may be some strangely aligned diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -2278,7 +2278,7 @@ /// The interface ensures that the assumed bits are always a subset of the known /// bits. Users can only add known bits and, except through adding known bits, /// they can only remove assumed bits. This should guarantee monotoniticy and -/// thereby the existence of a fixpoint (if used corretly). The fixpoint is +/// thereby the existence of a fixpoint (if used correctly). The fixpoint is /// reached when the assumed and known state/bits are equal. Users can /// force/inidicate a fixpoint. If an optimistic one is indicated, the known /// state will catch up with the assumed one, for a pessimistic fixpoint it is @@ -2536,7 +2536,7 @@ /// Set the assumed value to \p Value but never below the known one. void setAssumed(bool Value) { Assumed &= (Known | Value); } - /// Set the known and asssumed value to \p Value. + /// Set the known and assumed value to \p Value. void setKnown(bool Value) { Known |= Value; Assumed |= Value; @@ -3568,7 +3568,7 @@ /// } /// ``` /// In that case, AccessedBytesMap is `{0:4, 4:4, 8:4, 40:4}`. - /// AccessedBytesMap is std::map so it is iterated in accending order on + /// AccessedBytesMap is std::map so it is iterated in ascending order on /// key(Offset). So KnownBytes will be updated like this: /// /// |Access | KnownBytes diff --git a/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h b/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h --- a/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h +++ b/llvm/include/llvm/Transforms/IPO/FunctionAttrs.h @@ -42,7 +42,7 @@ /// Computes function attributes in post-order over the call graph. /// /// By operating in post-order, this pass computes precise attributes for -/// called functions prior to processsing their callers. This "bottom-up" +/// called functions prior to processing their callers. This "bottom-up" /// approach allows powerful interprocedural inference of function attributes /// like memory access patterns, etc. It can discover functions that do not /// access memory, or only read memory, and give them the readnone/readonly diff --git a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h --- a/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h +++ b/llvm/include/llvm/Transforms/Scalar/LoopPassManager.h @@ -417,7 +417,7 @@ /// The adaptor comes with two modes: the loop mode and the loop-nest mode, and /// the worklist updater lived inside will be in the same mode as the adaptor /// (refer to the documentation of \c LPMUpdater for more detailed explanation). -/// Specifically, in loop mode, all loops in the funciton will be pushed into +/// Specifically, in loop mode, all loops in the function will be pushed into /// the worklist and processed by \p Pass, while only top-level loops are /// processed in loop-nest mode. Please refer to the various specializations of /// \fn createLoopFunctionToLoopPassAdaptor to see when loop mode and loop-nest diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -1246,7 +1246,7 @@ return AliasResult::NoAlias; // Compute ranges of potentially accessed bytes for both accesses. If the - // interseciton is empty, there can be no overlap. + // intersection is empty, there can be no overlap. unsigned BW = OffsetRange.getBitWidth(); ConstantRange Range1 = OffsetRange.add( ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp --- a/llvm/lib/Analysis/DependenceAnalysis.cpp +++ b/llvm/lib/Analysis/DependenceAnalysis.cpp @@ -646,7 +646,7 @@ // Returns NoAlias/MayAliass/MustAlias for two memory locations based upon their // underlaying objects. If LocA and LocB are known to not alias (for any reason: -// tbaa, non-overlapping regions etc), then it is known there is no dependecy. +// tbaa, non-overlapping regions etc), then it is known there is no dependency. // Otherwise the underlying objects are checked to see if they point to // different identifiable objects. static AliasResult underlyingObjectsAlias(AAResults *AA, diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -2689,7 +2689,7 @@ default: return nullptr; - // Equality comaprisons are easy to fold. + // Equality comparisons are easy to fold. case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: break; diff --git a/llvm/lib/Analysis/LazyCallGraph.cpp b/llvm/lib/Analysis/LazyCallGraph.cpp --- a/llvm/lib/Analysis/LazyCallGraph.cpp +++ b/llvm/lib/Analysis/LazyCallGraph.cpp @@ -542,7 +542,7 @@ assert(SourceI > (SCCs.begin() + SourceIdx) && "Must have moved the source to fix the post-order."); assert(*std::prev(SourceI) == &TargetSCC && - "Last SCC to move should have bene the target."); + "Last SCC to move should have been the target."); // Return an empty range at the target SCC indicating there is nothing to // merge. diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -153,7 +153,7 @@ private: /// The objects necessary for carrying out an evaluation of the SavedModel. - /// They are expensive to set up, and we maintain them accross all the + /// They are expensive to set up, and we maintain them across all the /// evaluations of the model. TF_Session *Session = nullptr; TFGraphPtr Graph; diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -1372,7 +1372,7 @@ // that all the pointers in the group don't wrap. // So we check only group member 0 (which is always guaranteed to exist), // and group member Factor - 1; If the latter doesn't exist we rely on - // peeling (if it is a non-reversed accsess -- see Case 3). + // peeling (if it is a non-reversed access -- see Case 3). if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) continue; if (Group->getMember(Group->getFactor() - 1)) diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1336,7 +1336,7 @@ OutStreamer->AddComment("number of basic blocks"); OutStreamer->emitULEB128IntValue(MF.size()); const MCSymbol *PrevMBBEndSymbol = FunctionSymbol; - // Emit BB Information for each basic block in the funciton. + // Emit BB Information for each basic block in the function. for (const MachineBasicBlock &MBB : MF) { const MCSymbol *MBBSymbol = MBB.isEntryBlock() ? FunctionSymbol : MBB.getSymbol(); diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -2152,7 +2152,7 @@ } /// Return ClassOptions that should be present on both the forward declaration -/// and the defintion of a tag type. +/// and the definition of a tag type. static ClassOptions getCommonClassOptions(const DICompositeType *Ty) { ClassOptions CO = ClassOptions::None; diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1151,7 +1151,7 @@ M->debug_compile_units_end()); assert(NumDebugCUs > 0 && "Asm unexpectedly initialized"); assert(MMI->hasDebugInfo() && - "DebugInfoAvailabilty unexpectedly not initialized"); + "DebugInfoAvailability unexpectedly not initialized"); SingleCU = NumDebugCUs == 1; DenseMap> GVMap; @@ -1723,7 +1723,7 @@ for (auto &R : OpenRanges) Values.push_back(R.second); - // With Basic block sections, it is posssible that the StartLabel and the + // With Basic block sections, it is possible that the StartLabel and the // Instr are not in the same section. This happens when the StartLabel is // the function begin label and the dbg value appears in a basic block // that is not the entry. In this case, the range needs to be split to diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp --- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp @@ -398,7 +398,7 @@ computeActionsTable(LandingPads, Actions, FirstActions); // Compute the call-site table and call-site ranges. Normally, there is only - // one call-site-range which covers the whole funciton. With + // one call-site-range which covers the whole function. With // -basic-block-sections, there is one call-site-range per basic block // section. SmallVector CallSites; diff --git a/llvm/lib/CodeGen/BasicBlockSections.cpp b/llvm/lib/CodeGen/BasicBlockSections.cpp --- a/llvm/lib/CodeGen/BasicBlockSections.cpp +++ b/llvm/lib/CodeGen/BasicBlockSections.cpp @@ -12,7 +12,7 @@ // -fbasic-block-sections= option is used. Further, with profile information // only the subset of basic blocks with profiles are placed in separate sections // and the rest are grouped in a cold section. The exception handling blocks are -// treated specially to ensure they are all in one seciton. +// treated specially to ensure they are all in one section. // // Basic Block Sections // ==================== diff --git a/llvm/lib/CodeGen/BreakFalseDeps.cpp b/llvm/lib/CodeGen/BreakFalseDeps.cpp --- a/llvm/lib/CodeGen/BreakFalseDeps.cpp +++ b/llvm/lib/CodeGen/BreakFalseDeps.cpp @@ -136,7 +136,7 @@ const TargetRegisterClass *OpRC = TII->getRegClass(MI->getDesc(), OpIdx, TRI, *MF); - // If the instruction has a true dependency, we can hide the false depdency + // If the instruction has a true dependency, we can hide the false dependency // behind it. for (MachineOperand &CurrMO : MI->operands()) { if (!CurrMO.isReg() || CurrMO.isDef() || CurrMO.isUndef() || diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -7718,7 +7718,7 @@ // The register pressure on the IndirectBr edges is reduced because %GEPIOp is // no longer alive on them. // -// We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging +// We try to unmerge GEPs here in CodeGenPrepare, as opposed to limiting merging // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as // not to disable further simplications and optimizations as a result of GEP // merging. diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1655,7 +1655,7 @@ SrcReg = MIRBuilder.buildAnyExt(WideTy, SrcReg).getReg(0); } - // Theres no unmerge type to target. Directly extract the bits from the + // There's no unmerge type to target. Directly extract the bits from the // source type unsigned DstSize = DstTy.getSizeInBits(); diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp --- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -353,7 +353,7 @@ return; } - // At this point, we need to repair a defintion of a terminator. + // At this point, we need to repair a definition of a terminator. // Technically we need to fix the def of MI on all outgoing // edges of MI to keep the repairing local. In other words, we diff --git a/llvm/lib/CodeGen/LiveVariables.cpp b/llvm/lib/CodeGen/LiveVariables.cpp --- a/llvm/lib/CodeGen/LiveVariables.cpp +++ b/llvm/lib/CodeGen/LiveVariables.cpp @@ -826,7 +826,7 @@ return false; } -/// addNewBlock - Add a new basic block BB as an empty succcessor to DomBB. All +/// addNewBlock - Add a new basic block BB as an empty successor to DomBB. All /// variables that are live out of DomBB will be marked as passing live through /// BB. void LiveVariables::addNewBlock(MachineBasicBlock *BB, @@ -875,7 +875,7 @@ } } -/// addNewBlock - Add a new basic block BB as an empty succcessor to DomBB. All +/// addNewBlock - Add a new basic block BB as an empty successor to DomBB. All /// variables that are live out of DomBB will be marked as passing live through /// BB. LiveInSets[BB] is *not* updated (because it is not needed during /// PHIElimination). diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -2770,7 +2770,7 @@ // Check live-in list of each MBB. If a register is live into MBB, check // that the register is in regsLiveOut of each predecessor block. Since - // this must come from a definition in the predecesssor or its live-in + // this must come from a definition in the predecessor or its live-in // list, this will catch a live-through case where the predecessor does not // have the register in its live-in list. This currently only checks // registers that have no aliases, are not allocatable and are not diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -1027,7 +1027,7 @@ int DefStageNum = Schedule.getStage(Def); unsigned StageNum = CurStageNum; if (DefStageNum != -1 && (int)InstrStageNum > DefStageNum) { - // Compute the difference in stages between the defintion and the use. + // Compute the difference in stages between the definition and the use. unsigned StageDiff = (InstrStageNum - DefStageNum); // Make an adjustment to get the last definition. StageNum -= StageDiff; diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -1094,7 +1094,7 @@ int64_t OffsetBeforeAlignment = Offset; Offset = alignTo(Offset, StackAlign, Skew); - // If we have increased the offset to fulfill the alignment constrants, + // If we have increased the offset to fulfill the alignment constraints, // then the scavenging spill slots may become harder to reach from the // stack pointer, float them so they stay close. if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -113,7 +113,7 @@ static cl::opt LargeIntervalFreqThreshold( "large-interval-freq-threshold", cl::Hidden, - cl::desc("For a large interval, if it is coalesed with other live " + cl::desc("For a large interval, if it is coalesced with other live " "intervals many times more than the threshold, stop its " "coalescing to control the compile time. "), cl::init(100)); diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -920,7 +920,7 @@ !(MI.mayLoad() && !MI.isDereferenceableInvariantLoad())) continue; - // Always add dependecy edge to BarrierChain if present. + // Always add dependency edge to BarrierChain if present. if (BarrierChain) BarrierChain->addPredBarrier(SU); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -11130,7 +11130,7 @@ SL->SwitchCases.push_back(CB); } -// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb +// Scale CaseProb after peeling a case with the probability of PeeledCaseProb // from the swith statement. static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb) { @@ -11192,7 +11192,7 @@ Clusters.erase(PeeledCaseIt); for (CaseCluster &CC : Clusters) { LLVM_DEBUG( - dbgs() << "Scale the probablity for one cluster, before scaling: " + dbgs() << "Scale the probability for one cluster, before scaling: " << CC.Prob << "\n"); CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb); LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n"); @@ -11222,7 +11222,7 @@ // if there are many clusters. sortAndRangeify(Clusters); - // The branch probablity of the peeled case. + // The branch probability of the peeled case. BranchProbability PeeledCaseProb = BranchProbability::getZero(); MachineBasicBlock *PeeledSwitchMBB = peelDominantCaseCluster(SI, Clusters, PeeledCaseProb); diff --git a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp --- a/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp +++ b/llvm/lib/CodeGen/StackMapLivenessAnalysis.cpp @@ -46,7 +46,7 @@ /// This pass can be disabled via the -enable-patchpoint-liveness=false flag. /// The pass skips functions that don't have any patchpoint intrinsics. The /// information provided by this pass is optional and not required by the -/// aformentioned intrinsic to function. +/// aforementioned intrinsic to function. class StackMapLiveness : public MachineFunctionPass { const TargetRegisterInfo *TRI; LivePhysRegs LiveRegs; diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp --- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -2375,7 +2375,7 @@ // read-only section by the compiler. // For BSS kind, zero initialized data must be emitted to the .data section // because external linkage control sections that get mapped to the .bss - // section will be linked as tentative defintions, which is only appropriate + // section will be linked as tentative definitions, which is only appropriate // for SectionKind::Common. if (Kind.isData() || Kind.isReadOnlyWithRel() || Kind.isBSS()) { if (TM.getDataSections()) { diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1057,7 +1057,7 @@ } } - // Check if the reschedule will not break depedencies. + // Check if the reschedule will not break dependencies. unsigned NumVisited = 0; for (MachineInstr &OtherMI : make_range(mi, MachineBasicBlock::iterator(KillMI))) { diff --git a/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp b/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp --- a/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFAbbreviationDeclaration.cpp @@ -70,7 +70,7 @@ continue; } Optional ByteSize; - // If this abbrevation still has a fixed byte size, then update the + // If this abbreviation still has a fixed byte size, then update the // FixedAttributeSize as needed. switch (F) { case DW_FORM_addr: @@ -116,7 +116,7 @@ } else { // Attribute and form pairs must either both be non-zero, in which case // they are added to the abbreviation declaration, or both be zero to - // terminate the abbrevation declaration. In this case only one was + // terminate the abbreviation declaration. In this case only one was // zero which is an error. clear(); return false; diff --git a/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp b/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp --- a/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp +++ b/llvm/lib/DebugInfo/GSYM/FunctionInfo.cpp @@ -160,7 +160,7 @@ LR.FuncRange = {FuncAddr, FuncAddr + Data.getU32(&Offset)}; uint32_t NameOffset = Data.getU32(&Offset); // The "lookup" functions doesn't report errors as accurately as the "decode" - // function as it is meant to be fast. For more accurage errors we could call + // function as it is meant to be fast. For more accurate errors we could call // "decode". if (!Data.isValidOffset(Offset)) return createStringError(std::errc::io_error, diff --git a/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp b/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp --- a/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp +++ b/llvm/lib/ExecutionEngine/GDBRegistrationListener.cpp @@ -58,7 +58,7 @@ // FIXME: lli aims to provide both, RuntimeDyld and JITLink, as the dynamic // loaders for it's JIT implementations. And they both offer debugging via the // GDB JIT interface, which builds on the two well-known symbol names below. -// As these symbols must be unique accross the linked executable, we can only +// As these symbols must be unique across the linked executable, we can only // define them in one of the libraries and make the other depend on it. // OrcTargetProcess is a minimal stub for embedding a JIT client in remote // executors. For the moment it seems reasonable to have the definition there diff --git a/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp --- a/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/COFFLinkGraphBuilder.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// Generic COFF LinkGraph buliding code. +// Generic COFF LinkGraph building code. // //===----------------------------------------------------------------------===// #include "COFFLinkGraphBuilder.h" diff --git a/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp --- a/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// Generic ELF LinkGraph buliding code. +// Generic ELF LinkGraph building code. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp --- a/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp @@ -551,7 +551,7 @@ PassConfiguration Config; const Triple &TT = G->getTargetTriple(); if (Ctx->shouldAddDefaultTargetPasses(TT)) { - // Add eh-frame passses. + // Add eh-frame passes. Config.PrePrunePasses.push_back(DWARFRecordSectionSplitter(".eh_frame")); Config.PrePrunePasses.push_back(EHFrameEdgeFixer( ".eh_frame", 8, aarch64::Pointer32, aarch64::Pointer64, diff --git a/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp --- a/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// Generic MachO LinkGraph buliding code. +// Generic MachO LinkGraph building code. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp --- a/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachO_arm64.cpp @@ -560,7 +560,7 @@ Config.PrePrunePasses.push_back( CompactUnwindSplitter("__LD,__compact_unwind")); - // Add eh-frame passses. + // Add eh-frame passes. // FIXME: Prune eh-frames for which compact-unwind is available once // we support compact-unwind registration with libunwind. Config.PrePrunePasses.push_back( diff --git a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp --- a/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp @@ -475,7 +475,7 @@ PassConfiguration Config; if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) { - // Add eh-frame passses. + // Add eh-frame passes. Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_x86_64()); Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_x86_64()); diff --git a/llvm/lib/ExecutionEngine/Orc/Core.cpp b/llvm/lib/ExecutionEngine/Orc/Core.cpp --- a/llvm/lib/ExecutionEngine/Orc/Core.cpp +++ b/llvm/lib/ExecutionEngine/Orc/Core.cpp @@ -346,7 +346,7 @@ } } - // The OnResolveInfo struct will hold the aliases and responsibilty for each + // The OnResolveInfo struct will hold the aliases and responsibility for each // query in the list. struct OnResolveInfo { OnResolveInfo(std::unique_ptr R, @@ -920,7 +920,7 @@ MI.UnemittedDependencies.erase(&OtherJITDylib); } - // If this symbol dependended on any symbols in the error state then move + // If this symbol depended on any symbols in the error state then move // this symbol to the error state too. if (DependsOnSymbolInErrorState) Symbols[Name].setFlags(Symbols[Name].getFlags() | diff --git a/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp b/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp --- a/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp +++ b/llvm/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp @@ -232,7 +232,7 @@ if (auto *COFFObj = dyn_cast(&Obj)) { auto &ES = getExecutionSession(); - // For all resolved symbols that are not already in the responsibilty set: + // For all resolved symbols that are not already in the responsibility set: // check whether the symbol is in a comdat section and if so mark it as // weak. for (auto &Sym : COFFObj->symbols()) { diff --git a/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp b/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp --- a/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp +++ b/llvm/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp @@ -249,7 +249,7 @@ auto I = PendingCallWrapperResults.find(0); assert(PendingCallWrapperResults.size() == 1 && I != PendingCallWrapperResults.end() && - "Setup message handler not connectly set up"); + "Setup message handler not correctly set up"); auto SetupMsgHandler = std::move(I->second); PendingCallWrapperResults.erase(I); diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -1804,7 +1804,7 @@ } //===----------------------------------------------------------------------===// -// AttributeFuncs Function Defintions +// AttributeFuncs Function Definitions //===----------------------------------------------------------------------===// /// Which attributes cannot be applied to a type. diff --git a/llvm/lib/IR/BuiltinGCs.cpp b/llvm/lib/IR/BuiltinGCs.cpp --- a/llvm/lib/IR/BuiltinGCs.cpp +++ b/llvm/lib/IR/BuiltinGCs.cpp @@ -59,7 +59,7 @@ /// A GCStrategy which serves as an example for the usage of a statepoint based /// lowering strategy. This GCStrategy is intended to suitable as a default /// implementation usable with any collector which can consume the standard -/// stackmap format generated by statepoints, uses the default addrespace to +/// stackmap format generated by statepoints, uses the default addressspace to /// distinguish between gc managed and non-gc managed pointers, and has /// reasonable relocation semantics. class StatepointGC : public GCStrategy { diff --git a/llvm/lib/IR/PassManager.cpp b/llvm/lib/IR/PassManager.cpp --- a/llvm/lib/IR/PassManager.cpp +++ b/llvm/lib/IR/PassManager.cpp @@ -14,7 +14,7 @@ using namespace llvm; namespace llvm { -// Explicit template instantiations and specialization defininitions for core +// Explicit template instantiations and specialization definitions for core // template typedefs. template class AllAnalysesOn; template class AllAnalysesOn; diff --git a/llvm/lib/IR/ReplaceConstant.cpp b/llvm/lib/IR/ReplaceConstant.cpp --- a/llvm/lib/IR/ReplaceConstant.cpp +++ b/llvm/lib/IR/ReplaceConstant.cpp @@ -72,8 +72,8 @@ if (Insts) Insts->insert(NI); } else { - // We had already encountered CE, the correponding instruction already - // exist, use it to replace CE. + // We had already encountered CE, the corresponding instruction + // already exists, use it to replace CE. NI = Visited[CE]; } diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -594,7 +594,7 @@ return false; if (Symbol.isVariable() && Symbol.isUndefined()) { - // FIXME: this is here just to diagnose the case of a var = commmon_sym. + // FIXME: this is here just to diagnose the case of a var = common_sym. Layout.getBaseSymbol(Symbol); return false; } diff --git a/llvm/lib/MC/MCParser/AsmLexer.cpp b/llvm/lib/MC/MCParser/AsmLexer.cpp --- a/llvm/lib/MC/MCParser/AsmLexer.cpp +++ b/llvm/lib/MC/MCParser/AsmLexer.cpp @@ -716,7 +716,7 @@ if (CommentString.size() == 1) return CommentString[0] == Ptr[0]; - // Allow # preprocessor commments also be counted as comments for "##" cases + // Allow # preprocessor comments also be counted as comments for "##" cases if (CommentString[1] == '#') return CommentString[0] == Ptr[0]; diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -3131,7 +3131,7 @@ if (checkForValidSection()) return true; // Only support spaces as separators for .ascii directive for now. See the - // discusssion at https://reviews.llvm.org/D91460 for more details. + // discussion at https://reviews.llvm.org/D91460 for more details. do { if (parseEscapedString(Data)) return true; diff --git a/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/llvm/lib/MC/MCParser/ELFAsmParser.cpp --- a/llvm/lib/MC/MCParser/ELFAsmParser.cpp +++ b/llvm/lib/MC/MCParser/ELFAsmParser.cpp @@ -489,7 +489,7 @@ if (UniqueStr != "unique") return TokError("expected 'unique'"); if (L.isNot(AsmToken::Comma)) - return TokError("expected commma"); + return TokError("expected comma"); Lex(); if (getParser().parseAbsoluteExpression(UniqueID)) return true; diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp --- a/llvm/lib/MC/WasmObjectWriter.cpp +++ b/llvm/lib/MC/WasmObjectWriter.cpp @@ -1548,7 +1548,7 @@ } // Custom sections can also belong to COMDAT groups. In this case the - // decriptor's "index" field is the section index (in the final object + // descriptor's "index" field is the section index (in the final object // file), but that is not known until after layout, so it must be fixed up // later if (const MCSymbolWasm *C = Section.getGroup()) { diff --git a/llvm/lib/Option/OptTable.cpp b/llvm/lib/Option/OptTable.cpp --- a/llvm/lib/Option/OptTable.cpp +++ b/llvm/lib/Option/OptTable.cpp @@ -511,7 +511,7 @@ function_ref ErrorFn) const { SmallVector NewArgv; // The environment variable specifies initial options which can be overridden - // by commnad line options. + // by command line options. cl::expandResponseFiles(Argc, Argv, EnvVar, Saver, NewArgv); unsigned MAI, MAC; diff --git a/llvm/lib/ProfileData/MemProf.cpp b/llvm/lib/ProfileData/MemProf.cpp --- a/llvm/lib/ProfileData/MemProf.cpp +++ b/llvm/lib/ProfileData/MemProf.cpp @@ -101,7 +101,7 @@ } Result.push_back(static_cast(Tag)); } - // Advace the buffer to one past the schema if we succeeded. + // Advance the buffer to one past the schema if we succeeded. Buffer = Ptr; return Result; } diff --git a/llvm/lib/Support/FileUtilities.cpp b/llvm/lib/Support/FileUtilities.cpp --- a/llvm/lib/Support/FileUtilities.cpp +++ b/llvm/lib/Support/FileUtilities.cpp @@ -168,7 +168,7 @@ /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if the /// files match, 1 if they are different, and 2 if there is a file error. This -/// function differs from DiffFiles in that you can specify an absolete and +/// function differs from DiffFiles in that you can specify an absolute and /// relative FP error that is allowed to exist. If you specify a string to fill /// in for the error option, it will set the string to an error message if an /// error occurs, allowing the caller to distinguish between a failed diff and a diff --git a/llvm/lib/Support/Triple.cpp b/llvm/lib/Support/Triple.cpp --- a/llvm/lib/Support/Triple.cpp +++ b/llvm/lib/Support/Triple.cpp @@ -633,7 +633,7 @@ static Triple::ObjectFormatType parseFormat(StringRef EnvironmentName) { return StringSwitch(EnvironmentName) - // "xcoff" must come before "coff" because of the order-dependendent + // "xcoff" must come before "coff" because of the order-dependent // pattern matching. .EndsWith("xcoff", Triple::XCOFF) .EndsWith("coff", Triple::COFF) diff --git a/llvm/lib/Support/Unix/Path.inc b/llvm/lib/Support/Unix/Path.inc --- a/llvm/lib/Support/Unix/Path.inc +++ b/llvm/lib/Support/Unix/Path.inc @@ -1138,7 +1138,7 @@ return std::error_code(); RealPath->clear(); #if defined(F_GETPATH) - // When F_GETPATH is availble, it is the quickest way to get + // When F_GETPATH is available, it is the quickest way to get // the real path name. char Buffer[PATH_MAX]; if (::fcntl(ResultFD, F_GETPATH, Buffer) != -1) diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp --- a/llvm/lib/TableGen/Record.cpp +++ b/llvm/lib/TableGen/Record.cpp @@ -1699,7 +1699,7 @@ return const_cast(this); // No doubt that there exists a record, so we should check if types are - // compatiable. + // compatible. return IntInit::get(getRecordKeeper(), CurRec->getType()->typeIsA(CheckType)); } @@ -1712,7 +1712,7 @@ return const_cast(this); } - // Check if types are compatiable. + // Check if types are compatible. return IntInit::get(getRecordKeeper(), DefInit::get(D)->getType()->typeIsA(CheckType)); } diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp --- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -575,7 +575,7 @@ // Update the CFG first. updateTailPHIs(); - // Save successor probabilties before removing CmpBB and Tail from their + // Save successor probabilities before removing CmpBB and Tail from their // parents. BranchProbability Head2CmpBB = MBPI->getEdgeProbability(Head, CmpBB); BranchProbability CmpBB2Tail = MBPI->getEdgeProbability(CmpBB, Tail); @@ -583,7 +583,7 @@ Head->removeSuccessor(CmpBB); CmpBB->removeSuccessor(Tail); - // If Head and CmpBB had successor probabilties, udpate the probabilities to + // If Head and CmpBB had successor probabilities, udpate the probabilities to // reflect the ccmp-conversion. if (Head->hasSuccessorProbabilities() && CmpBB->hasSuccessorProbabilities()) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10711,7 +10711,7 @@ if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) return SDValue(); - // The DUPQ operation is indepedent of element type so normalise to i64s. + // The DUPQ operation is independent of element type so normalise to i64s. SDValue Idx128 = Op.getOperand(2); // DUPQ can be used when idx is in range. diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h @@ -311,7 +311,7 @@ /// Returns true if the instruction has a shift by immediate that can be /// executed in one cycle less. static bool isFalkorShiftExtFast(const MachineInstr &MI); - /// Return true if the instructions is a SEH instruciton used for unwinding + /// Return true if the instructions is a SEH instruction used for unwinding /// on Windows. static bool isSEHInstruction(const MachineInstr &MI); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -6697,7 +6697,7 @@ return false; // Find Definition. - assert(MI.getParent() && "Incomplete machine instruciton\n"); + assert(MI.getParent() && "Incomplete machine instruction\n"); MachineBasicBlock *MBB = MI.getParent(); MachineFunction *MF = MBB->getParent(); MachineRegisterInfo *MRI = &MF->getRegInfo(); diff --git a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp --- a/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp +++ b/llvm/lib/Target/AArch64/AArch64LowerHomogeneousPrologEpilog.cpp @@ -372,7 +372,7 @@ InstCount--; break; case FrameHelperType::PrologFrame: { - // Effecitvely no change in InstCount since FpAdjusment is included. + // Effecitvely no change in InstCount since FpAdjustment is included. break; } case FrameHelperType::Epilog: diff --git a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp --- a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp +++ b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp @@ -1,4 +1,4 @@ -//===- AArch64SLSHardening.cpp - Harden Straight Line Missspeculation -----===// +//===- AArch64SLSHardening.cpp - Harden Straight Line Misspeculation -----===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp --- a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp +++ b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp @@ -1,4 +1,4 @@ -//===- AArch64SpeculationHardening.cpp - Harden Against Missspeculation --===// +//===- AArch64SpeculationHardening.cpp - Harden Against Misspeculation --===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp @@ -193,7 +193,7 @@ void addSchedBarrierEdges(SUnit &SU); // Use a SCHED_BARRIER's mask to identify instruction SchedGroups that should - // not be reordered accross the SCHED_BARRIER. This is used for the base + // not be reordered across the SCHED_BARRIER. This is used for the base // SCHED_BARRIER, and not SCHED_GROUP_BARRIER. The difference is that // SCHED_BARRIER will always block all instructions that can be classified // into a particular SchedClass, whereas SCHED_GROUP_BARRIER has a fixed size diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -2773,7 +2773,7 @@ if (AFI.shouldSignReturnAddress()) { // The order of register must match the order we push them, because the // PEI assigns frame indices in that order. When compiling for return - // address sign and authenication, we use split push, therefore the orders + // address sign and authentication, we use split push, therefore the orders // we want are: // LR, R7, R6, R5, R4, , R11, R10, R9, R8, D15-D8 CSI.insert(find_if(CSI, diff --git a/llvm/lib/Target/ARM/ARMSLSHardening.cpp b/llvm/lib/Target/ARM/ARMSLSHardening.cpp --- a/llvm/lib/Target/ARM/ARMSLSHardening.cpp +++ b/llvm/lib/Target/ARM/ARMSLSHardening.cpp @@ -1,4 +1,4 @@ -//===- ARMSLSHardening.cpp - Harden Straight Line Missspeculation ---------===// +//===- ARMSLSHardening.cpp - Harden Straight Line Misspeculation ---------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1806,7 +1806,7 @@ bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat; auto LT = TLI->getTypeLegalizationCost(DL, ICA.getArgTypes()[0]); EVT MTy = TLI->getValueType(DL, ICA.getReturnType()); - // Check for the legal types, with the corect subtarget features. + // Check for the legal types, with the correct subtarget features. if ((ST->hasVFP2Base() && LT.second == MVT::f32 && MTy == MVT::i32) || (ST->hasFP64() && LT.second == MVT::f64 && MTy == MVT::i32) || (ST->hasFullFP16() && LT.second == MVT::f16 && MTy == MVT::i32)) diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -1157,9 +1157,9 @@ // Directive not convertable to compact unwind, bail out. DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() - << "CFI directive not compatiable with comact " - "unwind encoding, opcode=" << Inst.getOperation() - << "\n"); + << "CFI directive not compatible with compact " + "unwind encoding, opcode=" + << Inst.getOperation() << "\n"); return CU::UNWIND_ARM_MODE_DWARF; break; } diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -481,7 +481,7 @@ unsigned EncodedValue, const MCSubtargetInfo &STI) const { if (isThumb2(STI)) { - // NEON Thumb2 data-processsing encodings are very simple: bit 24 is moved + // NEON Thumb2 data-processing encodings are very simple: bit 24 is moved // to bit 12 of the high half-word (i.e. bit 28), and bits 27-24 are // set to 1111. unsigned Bit24 = EncodedValue & 0x01000000; diff --git a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp --- a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp +++ b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp @@ -857,7 +857,7 @@ if (PrevVCMP) { if (MachineOperand *MO = Instr.findRegisterUseOperand( PrevVCMP->getOperand(0).getReg(), /*isKill*/ true)) { - // If we come accross the instr that kills PrevVCMP's result, record it + // If we come across the instr that kills PrevVCMP's result, record it // so we can remove the kill flag later if we need to. PrevVCMPResultKiller = MO; } diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp --- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp +++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp @@ -320,7 +320,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = I->getOperand(0).getMBB(); continue; } diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp --- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp +++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp @@ -51,7 +51,7 @@ // !llvm.preserve.access.index // // Bitfield member access needs special attention. User cannot take the -// address of a bitfield acceess. To facilitate kernel verifier +// address of a bitfield access. To facilitate kernel verifier // for easy bitfield code optimization, a new clang intrinsic is introduced: // uint32_t __builtin_preserve_field_info(member_access, info_kind) // In IR, a chain with two (or more) intrinsic calls will be generated: diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.cpp b/llvm/lib/Target/BPF/BPFInstrInfo.cpp --- a/llvm/lib/Target/BPF/BPFInstrInfo.cpp +++ b/llvm/lib/Target/BPF/BPFInstrInfo.cpp @@ -204,7 +204,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = I->getOperand(0).getMBB(); continue; } diff --git a/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp b/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp --- a/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp +++ b/llvm/lib/Target/BPF/BPFMISimplifyPatchable.cpp @@ -11,7 +11,7 @@ // ldd r2, r1, 0 // add r3, struct_base_reg, r2 // -// Here @global should represent an AMA (abstruct member access). +// Here @global should represent an AMA (abstract member access). // Such an access is subject to bpf load time patching. After this pass, the // code becomes // ld_imm64 r1, @global diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp --- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp +++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp @@ -330,7 +330,7 @@ MachineFunction &MF = DAG.getMachineFunction(); - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp --- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -13,7 +13,7 @@ // Having said that, we should re-attempt to pull this earlier at some point // in future. -// The basic approach looks for sequence of predicated jump, compare instruciton +// The basic approach looks for sequence of predicated jump, compare instruction // that genereates the predicate and, the feeder to the predicate. Once it finds // all, it collapses compare and jump instruction into a new value jump // intstructions. diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.h @@ -97,7 +97,7 @@ // together. bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) override; - // isLegalToPruneDependencies - Is it legal to prune dependece between SUI + // isLegalToPruneDependencies - Is it legal to prune dependence between SUI // and SUJ. bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) override; diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp --- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -988,7 +988,7 @@ // We attempt to detect it by analyzing existing dependencies in the packet. // Analyze relationships between all existing members of the packet. - // Look for Anti dependecy on the same predicate reg as used in the + // Look for Anti dependency on the same predicate reg as used in the // candidate. for (auto I : CurrentPacketMIs) { // Scheduling Unit for current insn in the packet. diff --git a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp --- a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp @@ -326,7 +326,7 @@ return false; // This check is in place specifically for intrinsics. isSameOperationAs will // return two for any two hexagon intrinsics because they are essentially the - // same instruciton (CallInst). We need to scratch the surface to see if they + // same instruction (CallInst). We need to scratch the surface to see if they // are calls to the same function. if (CallInst *C1 = dyn_cast(I1)) { if (CallInst *C2 = dyn_cast(I2)) { diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td --- a/llvm/lib/Target/M68k/M68kInstrInfo.td +++ b/llvm/lib/Target/M68k/M68kInstrInfo.td @@ -464,7 +464,7 @@ // Complex Patterns //===----------------------------------------------------------------------===// -// NOTE Though this CP is not strictly necessarily it will simplify instruciton +// NOTE Though this CP is not strictly necessarily it will simplify instruction // definitions def MxCP_ARI : ComplexPattern; diff --git a/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp b/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp --- a/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp +++ b/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// This file contains defintions for M68k code emitter. +/// This file contains definitions for M68k code emitter. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp --- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -209,7 +209,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = I->getOperand(0).getMBB(); continue; } diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -6476,7 +6476,7 @@ } MCBinaryExpr::Opcode Opcode; // GAS and LLVM treat comparison operators different. GAS will generate -1 - // or 0, while LLVM will generate 0 or 1. Since a comparsion operator is + // or 0, while LLVM will generate 0 or 1. Since a comparison operator is // highly unlikely to be found in a memory offset expression, we don't // handle them. switch (Tok.getKind()) { diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -3631,7 +3631,7 @@ MipsFI->setVarArgsFrameIndex(0); - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -1008,7 +1008,7 @@ CurDAG->getDataLayout().getPointerSizeInBits(MemSD->getAddressSpace()); // Volatile Setting - // - .volatile is only availalble for .global and .shared + // - .volatile is only available for .global and .shared bool IsVolatile = MemSD->isVolatile(); if (CodeAddrSpace != NVPTX::PTXLdStInstCode::GLOBAL && CodeAddrSpace != NVPTX::PTXLdStInstCode::SHARED && diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCELFStreamer.cpp @@ -183,7 +183,7 @@ emitLabel(LabelSym, Inst.getLoc()); } -// This funciton checks if the parameter Inst is part of the setup for a link +// This function checks if the parameter Inst is part of the setup for a link // time GOT PC Relative optimization. For example in this situation: // // >)>> @@ -195,7 +195,7 @@ // and has the flag MCSymbolRefExpr::VK_PPC_PCREL_OPT. After that we just look // at the opcode and in the case of PLDpc we will return true. For the load // (or store) this function will return false indicating it has found the second -// instruciton in the pair. +// instruction in the pair. Optional llvm::isPartOfGOTToPCRelPair(const MCInst &Inst, const MCSubtargetInfo &STI) { // Need at least two operands. diff --git a/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp b/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp --- a/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp +++ b/llvm/lib/Target/PowerPC/PPCGenScalarMASSEntries.cpp @@ -76,7 +76,7 @@ if (!isa(CI)) return false; - // FIXME: no-errno and trapping-math need to be set for MASS converstion + // FIXME: no-errno and trapping-math need to be set for MASS conversion // but they don't have IR representation. return CI.hasNoNaNs() && CI.hasNoInfs() && CI.hasNoSignedZeros(); } diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -296,8 +296,8 @@ /// SelectAddrIdx - Given the specified address, check to see if it can be /// represented as an indexed [r+r] operation. /// This is for xform instructions whose associated displacement form is D. - /// The last parameter \p 0 means associated D form has no requirment for 16 - /// bit signed displacement. + /// The last parameter \p 0 means associated D form has no requirement for + /// 16 bit signed displacement. /// Returns false if it can be represented by [r+imm], which are preferred. bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) { return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, None); @@ -333,8 +333,8 @@ /// SelectAddrImm - Returns true if the address N can be represented by /// a base register plus a signed 16-bit displacement [r+imm]. - /// The last parameter \p 0 means D form has no requirment for 16 bit signed - /// displacement. + /// The last parameter \p 0 means D form has no requirement for 16 bit + /// signed displacement. bool SelectAddrImm(SDValue N, SDValue &Disp, SDValue &Base) { return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, None); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -320,7 +320,7 @@ PPC32_GOT, /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and - /// local dynamic TLS and position indendepent code on PPC32. + /// local dynamic TLS and position independent code on PPC32. PPC32_PICGOT, /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -5230,7 +5230,7 @@ unsigned RetOpc = 0; // This is a call through a function pointer. if (CFlags.IsIndirect) { - // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross + // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer across // indirect calls. The save of the caller's TOC pointer to the stack will be // inserted into the DAG as part of call lowering. The restore of the TOC // pointer is modeled by using a pseudo instruction for the call opcode that @@ -5244,7 +5244,7 @@ assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI."); RetOpc = PPCISD::CALL_NOTOC; } else if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) - // The ABIs that maintain a TOC pointer accross calls need to have a nop + // The ABIs that maintain a TOC pointer across calls need to have a nop // immediately following the call instruction if the caller and callee may // have different TOC bases. At link time if the linker determines the calls // may not share a TOC base, the call is redirected to a trampoline inserted @@ -9256,7 +9256,7 @@ // Exclude somes case where LD_SPLAT is worse than scalar_to_vector: // Below cases should also happen for "lfiwzx/lfiwax + LE target + index // 1" and "lxvrhx + BE target + index 7" and "lxvrbx + BE target + index - // 15", but funciton IsValidSplatLoad() now will only return true when + // 15", but function IsValidSplatLoad() now will only return true when // the data at index 0 is not nullptr. So we will not get into trouble for // these cases. // diff --git a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp --- a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp +++ b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp @@ -640,7 +640,7 @@ // ... // %add = getelementptr %phinode, %inc // -// First returned instruciton is %phinode (or a type cast to %phinode), caller +// First returned instruction is %phinode (or a type cast to %phinode), caller // needs this value to rewrite other load/stores in the same chain. // Second returned instruction is %add, caller needs this value to rewrite other // load/stores in the same chain. diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp --- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -1379,7 +1379,7 @@ bool IsPartiallyRedundant = (MBBtoMoveCmp != nullptr); // We cannot optimize an unsupported compare opcode or - // a mix of 32-bit and 64-bit comaprisons + // a mix of 32-bit and 64-bit comparisons if (!isSupportedCmpOp(CMPI1->getOpcode()) || !isSupportedCmpOp(CMPI2->getOpcode()) || is64bitCmpOp(CMPI1->getOpcode()) != is64bitCmpOp(CMPI2->getOpcode())) diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td b/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td --- a/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfoMMA.td @@ -50,7 +50,7 @@ // The AllocationPriority is in the range [0, 63]. Assigned the ACC registers // the highest possible priority in this range to force the register allocator // to assign these registers first. This is done because the ACC registers - // must represent 4 advacent vector registers. For example ACC1 must be + // must represent 4 adjacent vector registers. For example ACC1 must be // VS4 - VS7. The value here must be at least 32 as we want to allocate // these registers even before we allocate global ranges. let AllocationPriority = 63; diff --git a/llvm/lib/Target/PowerPC/README_ALTIVEC.txt b/llvm/lib/Target/PowerPC/README_ALTIVEC.txt --- a/llvm/lib/Target/PowerPC/README_ALTIVEC.txt +++ b/llvm/lib/Target/PowerPC/README_ALTIVEC.txt @@ -103,7 +103,7 @@ //===----------------------------------------------------------------------===// -The code generated for this is truly aweful: +The code generated for this is truly awful: vector float test(float a, float b) { return (vector float){ 0.0, a, 0.0, 0.0}; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -11153,7 +11153,7 @@ EVT PtrVT = getPointerTy(DAG.getDataLayout()); MVT XLenVT = Subtarget.getXLenVT(); unsigned XLenInBytes = Subtarget.getXLen() / 8; - // Used with vargs to acumulate store chains. + // Used with vargs to accumulate store chains. std::vector OutChains; // Assign locations to all of the incoming arguments. diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td --- a/llvm/lib/Target/Sparc/SparcInstrInfo.td +++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td @@ -1587,7 +1587,7 @@ Requires<[HasHardQuad]>; } -// Floating point conditional move instrucitons with %fcc0-%fcc3. +// Floating point conditional move instructions with %fcc0-%fcc3. let Predicates = [HasV9] in { let Constraints = "$f = $rd", intcc = 0 in { def V9MOVFCCrr diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -410,7 +410,7 @@ continue; } - // TBB is used to indicate the unconditinal destination. + // TBB is used to indicate the unconditional destination. TBB = Branch.getMBBTarget(); continue; } diff --git a/llvm/lib/Target/WebAssembly/README.txt b/llvm/lib/Target/WebAssembly/README.txt --- a/llvm/lib/Target/WebAssembly/README.txt +++ b/llvm/lib/Target/WebAssembly/README.txt @@ -17,7 +17,7 @@ applications that can run in browsers and other environments. wasi-sdk provides a more minimal C/C++ SDK based on clang, llvm and a libc based -on musl, for producing WebAssemmbly applictions that use the WASI ABI. +on musl, for producing WebAssemmbly applications that use the WASI ABI. Rust provides WebAssembly support integrated into Cargo. There are two main options: diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp @@ -190,8 +190,8 @@ } // Add BBs to exceptions' block set. This is a preparation to take out - // remaining incorect BBs from exceptions, because we need to iterate over BBs - // for each exception. + // remaining incorrect BBs from exceptions, because we need to iterate over + // BBs for each exception. for (auto *DomNode : post_order(&MDT)) { MachineBasicBlock *MBB = DomNode->getBlock(); WebAssemblyException *WE = getExceptionFor(MBB); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixBrTableDefaults.cpp @@ -41,7 +41,7 @@ char WebAssemblyFixBrTableDefaults::ID = 0; -// Target indepedent selection dag assumes that it is ok to use PointerTy +// Target independent selection dag assumes that it is ok to use PointerTy // as the index for a "switch", whereas Wasm so far only has a 32-bit br_table. // See e.g. SelectionDAGBuilder::visitJumpTableHeader // We have a 64-bit br_table in the tablegen defs as a result, which does get diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -250,7 +250,7 @@ for (auto T : {MVT::v2i64, MVT::v2f64}) setOperationAction(Op, T, Expand); - // But saturating fp_to_int converstions are + // But saturating fp_to_int conversions are for (auto Op : {ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}) setOperationAction(Op, MVT::v4i32, Custom); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -47,7 +47,7 @@ case WebAssembly::CONST_F32: case WebAssembly::CONST_F64: // isReallyTriviallyReMaterializableGeneric misses these because of the - // ARGUMENTS implicit def, so we manualy override it here. + // ARGUMENTS implicit def, so we manually override it here. return true; default: return false; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -531,7 +531,7 @@ // Macro fusion actually happens and there is no other fragment inserted // after the previous instruction. // - // Do nothing here since we already inserted a BoudaryAlign fragment when + // Do nothing here since we already inserted a BoundaryAlign fragment when // we met the first instruction in the fused pair and we'll tie them // together in emitInstructionEnd. // diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -5238,7 +5238,7 @@ SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); - // Multiply is commmutative. + // Multiply is commutative. if (!foldedLoad) { foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); if (foldedLoad) diff --git a/llvm/lib/Target/X86/X86InstrFMA3Info.h b/llvm/lib/Target/X86/X86InstrFMA3Info.h --- a/llvm/lib/Target/X86/X86InstrFMA3Info.h +++ b/llvm/lib/Target/X86/X86InstrFMA3Info.h @@ -43,7 +43,7 @@ /// This bit must be set in the 'Attributes' field of FMA group if such /// group of FMA opcodes consists of AVX512 opcodes accepting a k-mask and /// passing the elements from the 1st operand to the result of the operation - /// when the correpondings bits in the k-mask are unset. + /// when the corresponding bits in the k-mask are unset. KMergeMasked = 0x2, /// This bit must be set in the 'Attributes' field of FMA group if such diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -1010,7 +1010,7 @@ // Scalar SSE intrinsic fragments to match several different types of loads. // Used by scalar SSE intrinsic instructions which have 128 bit types, but // only load a single element. -// FIXME: We should add more canolicalizing in DAGCombine. Particulary removing +// FIXME: We should add more canonicalizing in DAGCombine. Particulary removing // the simple_load case. def sse_load_f16 : PatFrags<(ops node:$ptr), [(v8f16 (simple_load node:$ptr)), diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1392,7 +1392,7 @@ if (MinSize == 2 && Subtarget->is32Bit() && Subtarget->isTargetWindowsMSVC() && (Subtarget->getCPU().empty() || Subtarget->getCPU() == "pentium3")) { - // For compatibilty reasons, when targetting MSVC, is is important to + // For compatibility reasons, when targetting MSVC, is is important to // generate a 'legacy' NOP in the form of a 8B FF MOV EDI, EDI. Some tools // rely specifically on this pattern to be able to patch a function. // This is only for 32-bit targets, when using /arch:IA32 or /arch:SSE. diff --git a/llvm/lib/Target/X86/X86PreAMXConfig.cpp b/llvm/lib/Target/X86/X86PreAMXConfig.cpp --- a/llvm/lib/Target/X86/X86PreAMXConfig.cpp +++ b/llvm/lib/Target/X86/X86PreAMXConfig.cpp @@ -233,7 +233,7 @@ continue; IntrinsicInst *TileDef = dyn_cast(Op); assert((TileDef && isTileLoad(TileDef)) && - "All KeyAMX's tile definiation should comes from TileLoad!"); + "All KeyAMX's tile definition should comes from TileLoad!"); Shapes.push_back(TileDef->getOperand(0)); Shapes.push_back(TileDef->getOperand(1)); } diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -580,7 +580,7 @@ if (TermIt == MBB.end() || !TermIt->isBranch()) continue; - // Add all the non-EH-pad succossors to the blocks we want to harden. We + // Add all the non-EH-pad successors to the blocks we want to harden. We // skip EH pads because there isn't really a condition of interest on // entering. for (MachineBasicBlock *SuccMBB : MBB.successors()) diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -4133,7 +4133,7 @@ CurrVecTy->getNumElements() / CurrNumEltPerOp); assert(DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && - "coalesciing elements doesn't change vector width."); + "coalescing elements doesn't change vector width."); while (NumEltRemaining > 0) { assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"); diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp --- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -1230,7 +1230,7 @@ Shape.AsyncLowering.getContextAlignment()); if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { report_fatal_error( - "The alignment requirment of frame variables cannot be higher than " + "The alignment requirement of frame variables cannot be higher than " "the alignment of the async function context"); } break; @@ -2687,7 +2687,7 @@ } // Later code makes structural assumptions about single predecessors phis e.g - // that they are not live accross a suspend point. + // that they are not live across a suspend point. cleanupSinglePredPHIs(F); // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -1288,7 +1288,7 @@ if (!BR || !BR->isConditional() || CondCmp != BR->getCondition()) return false; - // And the comparsion looks like : %cond = icmp eq i8 %V, constant. + // And the comparison looks like : %cond = icmp eq i8 %V, constant. // So we try to resolve constant for the first operand only since the // second operand should be literal constant by design. ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0)); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -707,7 +707,7 @@ } // namespace PointerInfo } // namespace AA -/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. +/// Helper for AA::PointerInfo::Access DenseMap/Set usage. template <> struct DenseMapInfo : DenseMapInfo { using Access = AAPointerInfo::Access; @@ -722,7 +722,7 @@ struct DenseMapInfo : DenseMapInfo> {}; -/// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign +/// Helper for AA::PointerInfo::Access DenseMap/Set usage ignoring everythign /// but the instruction struct AccessAsInstructionInfo : DenseMapInfo { using Base = DenseMapInfo; diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -119,7 +119,7 @@ static cl::opt AlwaysInlineDeviceFunctions( "openmp-opt-inline-device", - cl::desc("Inline all applicible functions on the device."), cl::Hidden, + cl::desc("Inline all applicable functions on the device."), cl::Hidden, cl::init(false)); static cl::opt diff --git a/llvm/lib/Transforms/IPO/PartialInlining.cpp b/llvm/lib/Transforms/IPO/PartialInlining.cpp --- a/llvm/lib/Transforms/IPO/PartialInlining.cpp +++ b/llvm/lib/Transforms/IPO/PartialInlining.cpp @@ -750,9 +750,9 @@ // outlined region is predicted to be likely, its probability needs // to be made higher (more biased) to not under-estimate the cost of // function outlining. On the other hand, if the outlined region - // is predicted to be less likely, the predicted probablity is usually + // is predicted to be less likely, the predicted probability is usually // higher than the actual. For instance, the actual probability of the - // less likely target is only 5%, but the guessed probablity can be + // less likely target is only 5%, but the guessed probability can be // 40%. In the latter case, there is no need for further adjustement. // FIXME: add an option for this. if (OutlineRegionRelFreq < BranchProbability(45, 100)) diff --git a/llvm/lib/Transforms/IPO/SampleProfile.cpp b/llvm/lib/Transforms/IPO/SampleProfile.cpp --- a/llvm/lib/Transforms/IPO/SampleProfile.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfile.cpp @@ -1333,7 +1333,7 @@ // this callsite that makes this inlining potentially illegal. Need to // set ComputeFullInlineCost, otherwise getInlineCost may return early // when cost exceeds threshold without checking all IRs in the callee. - // The acutal cost does not matter because we only checks isNever() to + // The actual cost does not matter because we only checks isNever() to // see if it is legal to inline the callsite. InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params, GetTTI(*Callee), GetAC, GetTLI); diff --git a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp --- a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp +++ b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp @@ -75,7 +75,7 @@ if (F->isDeclaration()) return false; // Skip function that will not be emitted into object file. The prevailing - // defintion will be verified instead. + // definition will be verified instead. if (F->hasAvailableExternallyLinkage()) return false; // Do a name matching. diff --git a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp --- a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp +++ b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp @@ -187,7 +187,7 @@ Function::Create(EmptyFT, GlobalValue::ExternalLinkage, F.getAddressSpace(), "", &M); NewF->copyAttributesFrom(&F); - // Only copy function attribtues. + // Only copy function attributes. NewF->setAttributes(AttributeList::get(M.getContext(), AttributeList::FunctionIndex, F.getAttributes().getFnAttrs())); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -89,12 +89,12 @@ } const APFloat &getFpVal() const { - assert(IsFp && BufHasFpVal && "Incorret state"); + assert(IsFp && BufHasFpVal && "Incorrect state"); return *getFpValPtr(); } APFloat &getFpVal() { - assert(IsFp && BufHasFpVal && "Incorret state"); + assert(IsFp && BufHasFpVal && "Incorrect state"); return *getFpValPtr(); } diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp --- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -1729,7 +1729,7 @@ // doesn't either a) tell us the loop exits on the first iteration (unless // *all* exits are predicateable) or b) tell us *which* exit might be taken. // This transformation looks a lot like a restricted form of dead loop - // elimination, but restricted to read-only loops and without neccesssarily + // elimination, but restricted to read-only loops and without neccessarily // needing to kill the loop entirely. if (!LoopPredication) return false; diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp --- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp +++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp @@ -428,7 +428,7 @@ // order. Thus, if FC0 comes *before* FC1 in a FusionCandidateSet, then FC0 // dominates FC1 and FC1 post-dominates FC0. // std::set was chosen because we want a sorted data structure with stable -// iterators. A subsequent patch to loop fusion will enable fusing non-ajdacent +// iterators. A subsequent patch to loop fusion will enable fusing non-adjacent // loops by moving intervening code around. When this intervening code contains // loops, those loops will be moved also. The corresponding FusionCandidates // will also need to be moved accordingly. As this is done, having stable @@ -739,7 +739,7 @@ if (TC0 == 0 || TC1 == 0) { LLVM_DEBUG(dbgs() << "Loop(s) do not have a single exit point or do not " "have a constant number of iterations. Peeling " - "is not benefical\n"); + "is not beneficial\n"); return {false, None}; } diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -182,7 +182,7 @@ // forward and backward dependences qualify. Disqualify loads that have // other unknown dependences. - SmallPtrSet LoadsWithUnknownDepedence; + SmallPtrSet LoadsWithUnknownDependence; for (const auto &Dep : *Deps) { Instruction *Source = Dep.getSource(LAI); @@ -190,9 +190,9 @@ if (Dep.Type == MemoryDepChecker::Dependence::Unknown) { if (isa(Source)) - LoadsWithUnknownDepedence.insert(Source); + LoadsWithUnknownDependence.insert(Source); if (isa(Destination)) - LoadsWithUnknownDepedence.insert(Destination); + LoadsWithUnknownDependence.insert(Destination); continue; } @@ -219,9 +219,9 @@ Candidates.emplace_front(Load, Store); } - if (!LoadsWithUnknownDepedence.empty()) + if (!LoadsWithUnknownDependence.empty()) Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) { - return LoadsWithUnknownDepedence.count(C.Load); + return LoadsWithUnknownDependence.count(C.Load); }); return Candidates; @@ -245,7 +245,7 @@ /// However, we know that this is not the case here, i.e. we can rely on LAA /// to provide us with loop-independent dependences for the cases we're /// interested. Consider the case for example where a loop-independent - /// dependece S1->S2 invalidates the forwarding S3->S2. + /// dependence S1->S2 invalidates the forwarding S3->S2. /// /// A[i] = ... (S1) /// ... = A[i] (S2) diff --git a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp --- a/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ b/llvm/lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -474,7 +474,7 @@ NumLoopBlocksDeleted += DeadLoopBlocks.size(); } - /// Constant-fold terminators of blocks acculumated in FoldCandidates into the + /// Constant-fold terminators of blocks accumulated in FoldCandidates into the /// unconditional branches. void foldTerminators() { for (BasicBlock *BB : FoldCandidates) { diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -4856,7 +4856,7 @@ /// Now count registers number mathematical expectation for each formula: /// Note that for each use we exclude probability if not selecting for the use. /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding -/// probabilty 1/3 of not selecting for Use1). +/// probability 1/3 of not selecting for Use1). /// Use1: /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted diff --git a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp --- a/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/llvm/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -487,7 +487,7 @@ // In various bits below, we rely on the fact that uses are reachable from // defs. When there are basic blocks unreachable from the entry, dominance - // and reachablity queries return non-sensical results. Thus, we preprocess + // and reachability queries return non-sensical results. Thus, we preprocess // the function to ensure these properties hold. Modified |= removeUnreachableBlocks(F); diff --git a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp --- a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp +++ b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp @@ -29,7 +29,7 @@ namespace llvm { cl::opt ShouldPreserveAllAttributes( "assume-preserve-all", cl::init(false), cl::Hidden, - cl::desc("enable preservation of all attrbitues. even those that are " + cl::desc("enable preservation of all attributes. even those that are " "unlikely to be usefull")); cl::opt EnableKnowledgeRetention( diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -1845,8 +1845,9 @@ }); LLVM_DEBUG(if (verifyFunction(*oldFunction)) report_fatal_error("verification of oldFunction failed!")); - LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC)) - report_fatal_error("Stale Asumption cache for old Function!")); + LLVM_DEBUG( + if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC)) + report_fatal_error("Stale Assumption cache for old Function!")); return newFunction; } diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp --- a/llvm/lib/Transforms/Utils/LoopPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -311,7 +311,7 @@ continue; // If not, give up. // However, for equality comparisons, that isn't always sufficient to - // eliminate the comparsion in loop body, we may need to peel one more + // eliminate the comparison in loop body, we may need to peel one more // iteration. See if that makes !Pred become unknown again. if (ICmpInst::isEquality(Pred) && !SE.isKnownPredicate(ICmpInst::getInversePredicate(Pred), NextIterVal, @@ -331,7 +331,7 @@ /// This "heuristic" exactly matches implicit behavior which used to exist /// inside getLoopEstimatedTripCount. It was added here to keep an -/// improvement inside that API from causing peeling to become more agressive. +/// improvement inside that API from causing peeling to become more aggressive. /// This should probably be removed. static bool violatesLegacyMultiExitLoopCheck(Loop *L) { BasicBlock *Latch = L->getLoopLatch(); diff --git a/llvm/lib/Transforms/Utils/SymbolRewriter.cpp b/llvm/lib/Transforms/Utils/SymbolRewriter.cpp --- a/llvm/lib/Transforms/Utils/SymbolRewriter.cpp +++ b/llvm/lib/Transforms/Utils/SymbolRewriter.cpp @@ -40,7 +40,7 @@ // // Note that source and exactly one of [Target, Transform] must be provided // -// New rewrite descriptors can be created. Addding a new rewrite descriptor +// New rewrite descriptors can be created. Adding a new rewrite descriptor // involves: // // a) extended the rewrite descriptor kind enumeration diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -704,7 +704,7 @@ /// The legality analysis. LoopVectorizationLegality *Legal; - /// The profitablity analysis. + /// The profitability analysis. LoopVectorizationCostModel *Cost; // Record whether runtime checks are added. diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -9398,7 +9398,7 @@ }; // Any instruction which isn't safe to speculate at the begining of the - // block is control dependend on any early exit or non-willreturn call + // block is control dependent on any early exit or non-willreturn call // which proceeds it. if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { for (Instruction *I = BundleMember->Inst->getNextNode(); @@ -9426,7 +9426,7 @@ if (match(I, m_Intrinsic()) || match(I, m_Intrinsic())) // Any allocas past here must be control dependent on I, and I - // must be memory dependend on BundleMember->Inst. + // must be memory dependent on BundleMember->Inst. break; if (!isa(I)) diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h --- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -34,7 +34,7 @@ /// The legality analysis. LoopVectorizationLegality *Legal; - /// The profitablity analysis. + /// The profitability analysis. LoopVectorizationCostModel &CM; PredicatedScalarEvolution &PSE; diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -1499,7 +1499,7 @@ /// a vector operand into a scalar value, and adding the result to a chain. /// The Operands are {ChainOp, VecOp, [Condition]}. class VPReductionRecipe : public VPRecipeBase, public VPValue { - /// The recurrence decriptor for the reduction in question. + /// The recurrence descriptor for the reduction in question. const RecurrenceDescriptor *RdxDesc; /// Pointer to the TTI, needed to create the target reduction const TargetTransformInfo *TTI; diff --git a/llvm/test/Analysis/BasicAA/modref.ll b/llvm/test/Analysis/BasicAA/modref.ll --- a/llvm/test/Analysis/BasicAA/modref.ll +++ b/llvm/test/Analysis/BasicAA/modref.ll @@ -194,7 +194,7 @@ ret i32 %Diff } -;; In this case load can *not* be removed. Function clobers only %P2 but it may +;; In this case load can *not* be removed. Function clobbers only %P2 but it may ;; alias with %P. define i32 @test10(i32* %P, i32* %P2) { ; CHECK-LABEL: @test10( diff --git a/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll b/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll --- a/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll +++ b/llvm/test/Analysis/DependenceAnalysis/SimpleSIVNoValidityCheckFixedSize.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -disable-output -passes="print" 2>&1 | FileCheck %s -; Note: exact results can be achived even if +; Note: exact results can be achieved even if ; "-da-disable-delinearization-checks" is not used ; CHECK-LABEL: t1 diff --git a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll --- a/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/forward-loop-independent.ll @@ -1,10 +1,10 @@ ; RUN: opt -passes='print-access-info' -disable-output < %s 2>&1 | FileCheck %s -; Check that loop-indepedent forward dependences are discovered properly. +; Check that loop-independent forward dependences are discovered properly. ; ; FIXME: This does not actually always work which is pretty confusing. Right -; now there is hack in LAA that tries to figure out loop-indepedent forward -; dependeces *outside* of the MemoryDepChecker logic (i.e. proper dependence +; now there is hack in LAA that tries to figure out loop-independent forward +; dependences *outside* of the MemoryDepChecker logic (i.e. proper dependence ; analysis). ; ; Therefore if there is only loop-independent dependences for an array diff --git a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll --- a/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/stride-access-dependence.ll @@ -108,7 +108,7 @@ br i1 %cmp, label %for.body, label %for.cond.cleanup } -; Following cases are unsafe depdences and are not vectorizable. +; Following cases are unsafe dependences and are not vectorizable. ; void unsafe_Read_Write(int *A) { ; for (unsigned i = 0; i < 1024; i+=3) diff --git a/llvm/test/BugPoint/metadata.ll b/llvm/test/BugPoint/metadata.ll --- a/llvm/test/BugPoint/metadata.ll +++ b/llvm/test/BugPoint/metadata.ll @@ -8,7 +8,7 @@ ; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%pluginext %s -output-prefix %t-notype -bugpoint-crashcalls -silence-passes -disable-namedmd-remove -disable-strip-debuginfo > /dev/null ; RUN: llvm-dis %t-notype-reduced-simplified.bc -o - | FileCheck %s --check-prefix=NOTYPE ; -; Bugpoint can drop the metadata on the call, as it does not contrinute to the crash. +; Bugpoint can drop the metadata on the call, as it does not contribute to the crash. ; CHECK: call void @foo() ; NODEBUG: call void @foo() diff --git a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll --- a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll +++ b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll @@ -272,7 +272,7 @@ } ;; All non-aggregate fields must have the same type, all through the -;; overall aggreagate. This is false here because of the i32. +;; overall aggregate. This is false here because of the i32. %T_NESTED_STRUCT_DIFFM = type { [ 1 x { { double, double } } ], [ 1 x { { double, i32 } } ] diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll --- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -1,6 +1,6 @@ ; REQUIRES: asserts ; -; The Cortext-A57 machine model will avoid scheduling load instructions in +; The Cortex-A57 machine model will avoid scheduling load instructions in ; succession because loads on the A57 have a latency of 4 cycles and they all ; issue to the same pipeline. Instead, it will move other instructions between ; the loads to avoid unnecessary stalls. The generic machine model schedules 4 diff --git a/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir b/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir --- a/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir +++ b/llvm/test/CodeGen/AArch64/ccmp-successor-probs.mir @@ -1,6 +1,6 @@ # RUN: llc -o - %s -mtriple=aarch64--linux-gnu -mcpu=falkor -run-pass=aarch64-ccmp | FileCheck %s --- -# This test checks that successor probabilties are properly updated after a +# This test checks that successor probabilities are properly updated after a # ccmp-conversion. # # CHECK-LABEL: name: aarch64-ccmp-successor-probs diff --git a/llvm/test/CodeGen/AArch64/ifcvt-select.ll b/llvm/test/CodeGen/AArch64/ifcvt-select.ll --- a/llvm/test/CodeGen/AArch64/ifcvt-select.ll +++ b/llvm/test/CodeGen/AArch64/ifcvt-select.ll @@ -1,5 +1,5 @@ ; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone < %s | FileCheck %s -; Do not generate redundant select in early if-converstion pass. +; Do not generate redundant select in early if-conversion pass. define i32 @foo(i32 %a, i32 %b) { entry: diff --git a/llvm/test/CodeGen/AArch64/swift-async-win.ll b/llvm/test/CodeGen/AArch64/swift-async-win.ll --- a/llvm/test/CodeGen/AArch64/swift-async-win.ll +++ b/llvm/test/CodeGen/AArch64/swift-async-win.ll @@ -37,7 +37,7 @@ } ; NOTE: we do not see the canonical windows frame setup due to the `nounwind` -; attribtue on the function. +; attribute on the function. ; CHECK: sub sp, sp, #64 ; CHECK: stp x30, x29, [sp, #16] diff --git a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll --- a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll +++ b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll @@ -37,7 +37,7 @@ ; GISEL: STRXui %{{.*}}, %fixed-stack.0 ; Make sure that there is an dependence edge between fi#-2 and fi#-4. -; Without this edge the scheduler would be free to move the store accross the load. +; Without this edge the scheduler would be free to move the store across the load. ; COMMON: {{^SU(.*)}}: [[VRB]]:gpr64 = LDRXui %fixed-stack.2 ; COMMON-NOT: {{^SU(.*)}}: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll @@ -18,7 +18,7 @@ ret void } -; Check flags are preserved for an arbitrarry target intrinsic +; Check flags are preserved for an arbitrary target intrinsic ; CHECK-LABEL: name: rcp_nsz ; CHECK: = nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %{{[0-9]+}}(s32) define amdgpu_kernel void @rcp_nsz(float %arg0) { diff --git a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir --- a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir +++ b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir @@ -1,7 +1,7 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=si-lower-control-flow -amdgpu-remove-redundant-endcf %s -o - | FileCheck -check-prefix=GCN %s -# Make sure dbg_value doesn't change codeegn when collapsing end_cf +# Make sure dbg_value doesn't change codegen when collapsing end_cf --- name: simple_nested_if_dbg_value tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll --- a/llvm/test/CodeGen/AMDGPU/idot4u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll @@ -886,7 +886,7 @@ ret void } -; TODO: Support commutation accross the adds. +; TODO: Support commutation across the adds. define amdgpu_kernel void @udot4_CommutationAccrossMADs(<4 x i8> addrspace(1)* %src1, ; GFX7-LABEL: udot4_CommutationAccrossMADs: ; GFX7: ; %bb.0: ; %entry diff --git a/llvm/test/CodeGen/AMDGPU/idot8u.ll b/llvm/test/CodeGen/AMDGPU/idot8u.ll --- a/llvm/test/CodeGen/AMDGPU/idot8u.ll +++ b/llvm/test/CodeGen/AMDGPU/idot8u.ll @@ -2811,7 +2811,7 @@ ret void } -; TODO: Once the adictional "and+add" are removed, the pattern will be recognized. +; TODO: Once the additional "and+add" are removed, the pattern will be recognized. define amdgpu_kernel void @udot8_acc4_vecMul(<8 x i4> addrspace(1)* %src1, ; GFX7-LABEL: udot8_acc4_vecMul: ; GFX7: ; %bb.0: ; %entry diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll --- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll +++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll @@ -422,7 +422,7 @@ ret void } -; offset puts outside of superegister bounaries, so clamp to 1st element. +; offset puts outside of superegister boundaries, so clamp to 1st element. ; GCN-LABEL: {{^}}extract_largest_inbounds_offset: ; GCN-DAG: buffer_load_dwordx4 v[[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]] ; GCN-DAG: s_load_dword [[IDX0:s[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir b/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir --- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir +++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu.mir @@ -195,7 +195,7 @@ $vgpr0 = V_ADD_U32_e32 $vgpr0, $vgpr1, implicit $exec ... -# There's no need to encode the VALU depdendency because it will complete before +# There's no need to encode the VALU dependency because it will complete before # the TRANS. --- name: trans32_dep_1_only diff --git a/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll b/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll --- a/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll +++ b/llvm/test/CodeGen/AMDGPU/remaining-virtual-register-operands.ll @@ -8,7 +8,7 @@ ; The machine verifier complains about usage of register ; which is marked as killed in previous instruction. ; This happens due to when register allocator is out of registers -; it takes the first avialable register. +; it takes the first available register. ; CHECK: error: ran out of registers during register allocation ; CHECK: Bad machine code: Using an undefined physical register diff --git a/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll b/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll --- a/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll +++ b/llvm/test/CodeGen/AMDGPU/schedule-global-loads.ll @@ -19,7 +19,7 @@ ret void } -; Test for a crach in SIInstrInfo::areLoadsFromSameBasePtr() when checking +; Test for a crash in SIInstrInfo::areLoadsFromSameBasePtr() when checking ; an MUBUF load which does not have a vaddr operand. ; FUNC-LABEL: {{^}}same_base_ptr_crash: ; SI: buffer_load_dword diff --git a/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll b/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll --- a/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll +++ b/llvm/test/CodeGen/AMDGPU/scratch-buffer.ll @@ -4,7 +4,7 @@ ; When a frame index offset is more than 12-bits, make sure we don't store ; it in mubuf's offset field. -; Also, make sure we use the same register for storing the scratch buffer addresss +; Also, make sure we use the same register for storing the scratch buffer address ; for both stores. This register is allocated by the register scavenger, so we ; should be able to reuse the same regiser for each scratch buffer access. diff --git a/llvm/test/CodeGen/AMDGPU/structurize1.ll b/llvm/test/CodeGen/AMDGPU/structurize1.ll --- a/llvm/test/CodeGen/AMDGPU/structurize1.ll +++ b/llvm/test/CodeGen/AMDGPU/structurize1.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=r600 -mcpu=redwood -r600-if-convert=0 < %s | FileCheck %s -; This tests for abug where the AMDILCFGStructurizer was crashing on loops +; This tests for a bug where the AMDILCFGStructurizer was crashing on loops ; like this: ; ; for (i = 0; i < x; i++) { diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll --- a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll @@ -216,7 +216,7 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp, float %bias, float %zcompare, float %s, float %t, float %clamp) { ; The vgpr tuple8 operand in image_gather4_c_b_cl instruction needs to be preserved -; across the call and should get allcoated to 8 CSRs. +; across the call and should get allocated to 8 CSRs. ; Only the lower 5 sub-registers of the tuple are preserved. ; The upper 3 sub-registers are unused. ; GFX9-LABEL: call_preserved_vgpr_tuple8: diff --git a/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll b/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll --- a/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll +++ b/llvm/test/CodeGen/AMDGPU/vtx-schedule.ll @@ -2,7 +2,7 @@ ; This test is for a scheduler bug where VTX_READ instructions that used ; the result of another VTX_READ instruction were being grouped in the -; same fetch clasue. +; same fetch clause. ; CHECK: {{^}}test: ; CHECK: Fetch clause diff --git a/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir b/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir --- a/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir +++ b/llvm/test/CodeGen/AVR/pseudo/FRMIDX.mir @@ -1,7 +1,7 @@ # RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s # TODO: Write this test. -# This instruction isn't expanded by the pseudo expansion passs, but +# This instruction isn't expanded by the pseudo expansion pass, but # rather AVRRegisterInfo::eliminateFrameIndex. --- | diff --git a/llvm/test/CodeGen/Hexagon/fp16.ll b/llvm/test/CodeGen/Hexagon/fp16.ll --- a/llvm/test/CodeGen/Hexagon/fp16.ll +++ b/llvm/test/CodeGen/Hexagon/fp16.ll @@ -6,7 +6,7 @@ ; (__extendhfsf2). ; The extension from fp16 to fp64 is implicitly handled by __extendhfsf2 and convert_sf2d. ; (fp16->fp32->fp64). -; Generate correcct libcall names for conversion from fp32/fp64 to fp16 +; Generate correct libcall names for conversion from fp32/fp64 to fp16 ; (__truncsfhf2 and __truncdfhf2) ; Verify that we generate loads and stores of halfword. diff --git a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll --- a/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll +++ b/llvm/test/CodeGen/Hexagon/swp-epilog-phi7.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=hexagon -O2 -enable-pipeliner -disable-block-placement=0 < %s | FileCheck %s ; For the Phis generated in the epilog, test that we generate the correct -; names for the values coming from the prolog stages. The test belows +; names for the values coming from the prolog stages. The test below ; checks that the value loaded in the first prolog block gets propagated ; through the first epilog to the use after the loop. diff --git a/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll b/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll --- a/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll +++ b/llvm/test/CodeGen/Hexagon/swp-listen-loop3.ll @@ -1,6 +1,6 @@ ; RUN: llc -march=hexagon -pipeliner-ignore-recmii -pipeliner-max-stages=2 -enable-pipeliner < %s -pipeliner-experimental-cg=true | FileCheck %s -; This is a loop we pipeline to three packets, though we could do bettter. +; This is a loop we pipeline to three packets, though we could do better. ; CHECK: loop0(.LBB0_[[LOOP:.]], ; CHECK: .LBB0_[[LOOP]]: diff --git a/llvm/test/CodeGen/M68k/Arith/mul64.ll b/llvm/test/CodeGen/M68k/Arith/mul64.ll --- a/llvm/test/CodeGen/M68k/Arith/mul64.ll +++ b/llvm/test/CodeGen/M68k/Arith/mul64.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=m68k-linux -verify-machineinstrs | FileCheck %s -; Currenlty making the libcall is ok, x20 supports i32 mul/div which +; Currently making the libcall is ok, x20 supports i32 mul/div which ; yields saner expansion for i64 mul define i64 @foo(i64 %t, i64 %u) nounwind { ; CHECK-LABEL: foo: diff --git a/llvm/test/CodeGen/M68k/CollapseMOVEM.mir b/llvm/test/CodeGen/M68k/CollapseMOVEM.mir --- a/llvm/test/CodeGen/M68k/CollapseMOVEM.mir +++ b/llvm/test/CodeGen/M68k/CollapseMOVEM.mir @@ -4,7 +4,7 @@ #------------------------------------------------------------------------------ # CollapseMOVEM pass finds sequences of MOVEM instructions and collapse them -# into a single instruciton with merged masks. This only works with stack data +# into a single instruction with merged masks. This only works with stack data #------------------------------------------------------------------------------ --- # CollapseMOVEM_RM diff --git a/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll b/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll --- a/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll +++ b/llvm/test/CodeGen/PowerPC/ppc-vaarg-agg.ll @@ -40,7 +40,7 @@ ret void } -; If the SD nodes are not cleaup up correctly, then this can fail to compile +; If the SD nodes are not cleaned up correctly, then this can fail to compile ; with an error like: Cannot select: ch = setlt [ID=6] ; CHECK: @test1 diff --git a/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll b/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll --- a/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-sibcall.ll @@ -45,7 +45,7 @@ define dso_local void @caller_64_64_copy_ccc([8 x i64] %a, [8 x i64] %b) #1 { tail call fastcc void @callee_64_64_copy_fastcc([8 x i64] %a, [8 x i64] %b) ret void -; If caller and callee use different calling convensions, we cannot apply TCO. +; If caller and callee use different calling conventions, we cannot apply TCO. ; CHECK-SCO-LABEL: caller_64_64_copy_ccc: ; CHECK-SCO: bl callee_64_64_copy_fastcc } diff --git a/llvm/test/CodeGen/PowerPC/store_fptoi.ll b/llvm/test/CodeGen/PowerPC/store_fptoi.ll --- a/llvm/test/CodeGen/PowerPC/store_fptoi.ll +++ b/llvm/test/CodeGen/PowerPC/store_fptoi.ll @@ -5,7 +5,7 @@ ; RUN: -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-PWR8 %s ; ========================================== -; Tests for store of fp_to_sint converstions +; Tests for store of fp_to_sint conversions ; ========================================== ; Function Attrs: norecurse nounwind @@ -589,7 +589,7 @@ } ; ========================================== -; Tests for store of fp_to_uint converstions +; Tests for store of fp_to_uint conversions ; ========================================== ; Function Attrs: norecurse nounwind diff --git a/llvm/test/CodeGen/PowerPC/test-vector-insert.ll b/llvm/test/CodeGen/PowerPC/test-vector-insert.ll --- a/llvm/test/CodeGen/PowerPC/test-vector-insert.ll +++ b/llvm/test/CodeGen/PowerPC/test-vector-insert.ll @@ -18,7 +18,7 @@ ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \ ; RUN: -mcpu=pwr9 < %s | FileCheck %s --check-prefix=CHECK-BE-P9 ; xscvdpsxws and xscvdpsxws is only available on Power7 and above -; Codgen is different for Power7, Power8, and Power9. +; Codegen is different for Power7, Power8, and Power9. define dso_local <4 x i32> @test(<4 x i32> %a, double %b) { ; CHECK-LE-P7-LABEL: test: diff --git a/llvm/test/CodeGen/RISCV/frm-dependency.ll b/llvm/test/CodeGen/RISCV/frm-dependency.ll --- a/llvm/test/CodeGen/RISCV/frm-dependency.ll +++ b/llvm/test/CodeGen/RISCV/frm-dependency.ll @@ -57,7 +57,7 @@ ret float %1 } -; This uses rtz instead of dyn rounding mode so shouldn't have an FRM dependncy. +; This uses rtz instead of dyn rounding mode so shouldn't have an FRM dependency. define i32 @fcvt_w_s(float %a) nounwind { ; RV32IF-LABEL: name: fcvt_w_s ; RV32IF: bb.0 (%ir-block.0): diff --git a/llvm/test/CodeGen/SystemZ/fp-const-10.ll b/llvm/test/CodeGen/SystemZ/fp-const-10.ll --- a/llvm/test/CodeGen/SystemZ/fp-const-10.ll +++ b/llvm/test/CodeGen/SystemZ/fp-const-10.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s ; Test that we don't do an FP extending load, as this would result in a -; converstion to QNaN. +; conversion to QNaN. define double @f1() { ; CHECK-LABEL: .LCPI0_0 ; CHECK: .quad 0x7ff4000000000000 diff --git a/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll b/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll --- a/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll +++ b/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -O0 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 -mattr=+vfp2 ; This test creates a big stack frame without spilling any callee-saved registers. -; Make sure the whole stack frame is addrerssable wiothout scavenger crashes. +; Make sure the whole stack frame is addressable wiothout scavenger crashes. target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32" target triple = "thumbv7-apple-darwin3.0.0-iphoneos" diff --git a/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll b/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll --- a/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll +++ b/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll @@ -4,7 +4,7 @@ define i64 @f1(i64 %a, i64 %b) { ; CHECK-LABEL: f1: ; CHECK: subs.w r0, r0, r2 -; To test dead_carry, +32bit prevents sbc conveting to 16-bit sbcs +; To test dead_carry, +32bit prevents sbc converting to 16-bit sbcs ; CHECK: sbc.w r1, r1, r3 %tmp = sub i64 %a, %b ret i64 %tmp diff --git a/llvm/test/CodeGen/WebAssembly/exception.ll b/llvm/test/CodeGen/WebAssembly/exception.ll --- a/llvm/test/CodeGen/WebAssembly/exception.ll +++ b/llvm/test/CodeGen/WebAssembly/exception.ll @@ -319,7 +319,7 @@ ret void } -; Tests a case when a cleanup region (cleanuppad ~ clanupret) contains another +; Tests a case when a cleanup region (cleanuppad ~ cleanupret) contains another ; catchpad define void @test_complex_cleanup_region() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: diff --git a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll --- a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll +++ b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll @@ -4,7 +4,7 @@ ; ; It's possible to schedule this in 14 instructions by avoiding ; callee-save registers, but the scheduler isn't currently that -; conervative with registers. +; conservative with registers. @size20 = external dso_local global i32 ; [#uses=1] @in5 = external dso_local global ptr ; [#uses=1] diff --git a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll --- a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll +++ b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll @@ -10,7 +10,7 @@ ; ; Move return address (76(%esp)) to a temporary register (%ebp) ; CHECK: movl 76(%esp), [[REGISTER:%[a-z]+]] -; Overwrite return addresss +; Overwrite return address ; CHECK: movl [[EBX:%[a-z]+]], 76(%esp) ; Move return address from temporary register (%ebp) to new stack location (60(%esp)) ; CHECK: movl [[REGISTER]], 60(%esp) diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll --- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll @@ -1080,7 +1080,7 @@ } ; Test cases for . -; Instruction selection for broacast instruction fails if +; Instruction selection for broadcast instruction fails if ; the load cannot be folded into the broadcast. ; This happens if the load has initial one use but other uses are ; created later, or if selection DAG cannot prove that folding the diff --git a/llvm/test/CodeGen/X86/bitcnt-false-dep.ll b/llvm/test/CodeGen/X86/bitcnt-false-dep.ll --- a/llvm/test/CodeGen/X86/bitcnt-false-dep.ll +++ b/llvm/test/CodeGen/X86/bitcnt-false-dep.ll @@ -86,7 +86,7 @@ ;HSW: xorl [[GPR0:%e[a-d]x]], [[GPR0]] ;HSW-NEXT: tzcntl {{.*}}, [[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_tzct32 ;SKL-NOT: xor ;SKL: tzcntl @@ -113,7 +113,7 @@ ;HSW: xorl %e[[GPR0:[a-d]x]], %e[[GPR0]] ;HSW-NEXT: tzcntq {{.*}}, %r[[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_tzct64 ;SKL-NOT: xor ;SKL: tzcntq @@ -140,7 +140,7 @@ ;HSW: xorl [[GPR0:%e[a-d]x]], [[GPR0]] ;HSW-NEXT: lzcntl {{.*}}, [[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_lzct32 ;SKL-NOT: xor ;SKL: lzcntl @@ -167,7 +167,7 @@ ;HSW: xorl %e[[GPR0:[a-d]x]], %e[[GPR0]] ;HSW-NEXT: lzcntq {{.*}}, %r[[GPR0]] -; This false dependecy issue was fixed in Skylake +; This false dependency issue was fixed in Skylake ;SKL-LABEL:@loopdep_lzct64 ;SKL-NOT: xor ;SKL: lzcntq diff --git a/llvm/test/CodeGen/X86/block-placement.ll b/llvm/test/CodeGen/X86/block-placement.ll --- a/llvm/test/CodeGen/X86/block-placement.ll +++ b/llvm/test/CodeGen/X86/block-placement.ll @@ -1496,7 +1496,7 @@ ; if it introduces extra branch. ; Specifically in this case because best exit is .header ; but it has fallthrough to .middle block and last block in -; loop chain .slow does not have afallthrough to .header. +; loop chain .slow does not have a fallthrough to .header. ; CHECK-LABEL: not_rotate_if_extra_branch ; CHECK: %.entry ; CHECK: %.header @@ -1541,7 +1541,7 @@ define i32 @not_rotate_if_extra_branch_regression(i32 %count, i32 %init) { ; This is a regression test against patch avoid loop rotation if -; it introduce an extra btanch. +; it introduce an extra branch. ; CHECK-LABEL: not_rotate_if_extra_branch_regression ; CHECK: %.entry ; CHECK: %.first_backedge diff --git a/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll b/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll --- a/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll +++ b/llvm/test/CodeGen/X86/crash-lre-eliminate-dead-def.ll @@ -25,7 +25,7 @@ ; v1 is now dead so we remove its live-range. ; Actually, we shrink it to empty to keep the ; instruction around for futher remat opportunities -; (accessbile via the origin pointer.) +; (accessible via the origin pointer.) ; ; Later v2 gets remove as well (e.g., because we ; remat it closer to its use) and the live-range diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll --- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll +++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll @@ -3,7 +3,7 @@ ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X)) -; Canolicalize the sequence shl/zext/lshr performing the zeroextend +; Canonicalize the sequence shl/zext/lshr performing the zeroextend ; as the last instruction of the sequence. ; This will help DAGCombiner to identify and then fold the sequence ; of shifts into a single AND. diff --git a/llvm/test/CodeGen/X86/fptoui-may-overflow.ll b/llvm/test/CodeGen/X86/fptoui-may-overflow.ll --- a/llvm/test/CodeGen/X86/fptoui-may-overflow.ll +++ b/llvm/test/CodeGen/X86/fptoui-may-overflow.ll @@ -17,7 +17,7 @@ ret <16 x i8> %b } -; In @fptoui_shuffle, we must preserve the vpand for correctnesss. Only the +; In @fptoui_shuffle, we must preserve the vpand for correctness. Only the ; i8 values extracted from %s are poison. The values from the zeroinitializer ; are not. diff --git a/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll b/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll --- a/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll +++ b/llvm/test/CodeGen/X86/i386-tlscall-fastregalloc.ll @@ -18,7 +18,7 @@ ; Get p. ; CHECK-NEXT: movl _p@{{[0-9a-zA-Z]+}}, [[P_ADDR:%[a-z]+]] ; CHECK-NEXT: calll *([[P_ADDR]]) -; At this point eax contiains the address of p. +; At this point eax contains the address of p. ; Load c address. ; Make sure we do not clobber eax. ; CHECK-NEXT: movl [[C_SPILLED]], [[C_ADDR_RELOADED:%e[b-z]x+]] diff --git a/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll b/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll --- a/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll +++ b/llvm/test/CodeGen/X86/win64-funclet-preisel-intrinsics.ll @@ -4,7 +4,7 @@ ; regular function calls in the course of IR transformations. ; ; Test that the code generator will emit the function call and not consider it -; an "implausible instruciton". In the past this silently truncated code on +; an "implausible instruction". In the past this silently truncated code on ; exception paths and caused crashes at runtime. ; ; Reduced IR generated from ObjC++ source: diff --git a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll --- a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll +++ b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll @@ -4,7 +4,7 @@ ;; In functions with 'no_caller_saved_registers' attribute, all registers should ;; be preserved except for registers used for passing/returning arguments. ;; In the following function registers %rdi, %rsi and %xmm0 are used to store -;; arguments %a0, %a1 and %b0 accordingally. The value is returned in %rax. +;; arguments %a0, %a1 and %b0 accordingly. The value is returned in %rax. ;; The above registers should not be preserved, however other registers ;; (that are modified by the function) should be preserved (%rdx and %xmm1). define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 { diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll --- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -276,7 +276,7 @@ ; Check landing pad again. ; This time checks that we can shrink-wrap when the epilogue does not -; span accross several blocks. +; span across several blocks. ; ; CHECK-LABEL: with_nounwind_same_succ: ; diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll --- a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll @@ -1168,7 +1168,7 @@ @a = common global i32 0, align 4 ; Make sure the prologue does not clobber the EFLAGS when -; it is live accross. +; it is live across. ; PR25629. ; Note: The registers may change in the following patterns, but ; because they imply register hierarchy (e.g., eax, al) this is diff --git a/llvm/test/DebugInfo/Generic/debug-info-enum.ll b/llvm/test/DebugInfo/Generic/debug-info-enum.ll --- a/llvm/test/DebugInfo/Generic/debug-info-enum.ll +++ b/llvm/test/DebugInfo/Generic/debug-info-enum.ll @@ -170,7 +170,7 @@ ; Test enumeration without a fixed underlying type, but with the DIFlagEnumClass ; set. The DW_AT_enum_class attribute should be absent. This behaviour is -; intented to keep compatibilty with existing DWARF consumers, which may imply +; intented to keep compatibility with existing DWARF consumers, which may imply ; the type is present whenever DW_AT_enum_class is set. !63 = !DICompositeType(tag: DW_TAG_enumeration_type, name: "E9", file: !3, line: 63, size: 32, flags: DIFlagEnumClass, elements: !64, identifier: "_ZTS2E9") !64 = !{!65, !66} diff --git a/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir b/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir --- a/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir +++ b/llvm/test/DebugInfo/MIR/X86/mlicm-hoist-pre-regalloc.mir @@ -1,7 +1,7 @@ --- | ; RUN: llc -run-pass=machinelicm -o - %s | FileCheck %s ; Line numbers should not be retained when loop invariant instructions are hoisted. - ; Doing so causes poor stepping bevavior. + ; Doing so causes poor stepping behavior. ; ; Created from: ; int x; diff --git a/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll b/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll --- a/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll +++ b/llvm/test/DebugInfo/X86/dbg-value-funcarg3.ll @@ -26,7 +26,7 @@ ; CHECK-NEXT: COPY ; CHECK-NEXT: RET ; -;; For instr-ref, no copies should be considered. Because argumenst are +;; For instr-ref, no copies should be considered. Because arguments are ;; Special, we don't label them in the same way, and currently emit a ;; DBG_VALUE for the physreg. ; INSTRREF-LABEL: name: fn1 diff --git a/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll b/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll --- a/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll +++ b/llvm/test/DebugInfo/X86/dimodule-external-fortran.ll @@ -8,7 +8,7 @@ ; real :: dummy ; end module external_module ; -; em.f90 (to compile: -g -llvm-emit -c -S em.f90) +; em.f90 (to compile: -g -llvm-emit -c -S em.f90) ; program use_external_module ; use external_module ; implicit none diff --git a/llvm/test/DebugInfo/macro_link.ll b/llvm/test/DebugInfo/macro_link.ll --- a/llvm/test/DebugInfo/macro_link.ll +++ b/llvm/test/DebugInfo/macro_link.ll @@ -1,6 +1,6 @@ ; RUN: llvm-link %s %s -S -o -| FileCheck %s -; This test checks that DIMacro and DIMacroFile comaprison works correctly. +; This test checks that DIMacro and DIMacroFile comparison works correctly. ; CHECK: !llvm.dbg.cu = !{[[CU1:![0-9]*]], [[CU2:![0-9]*]]} diff --git a/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll b/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll --- a/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll +++ b/llvm/test/Instrumentation/SanitizerCoverage/gep-tracing.ll @@ -31,7 +31,7 @@ ; CHECK: call void @__sanitizer_cov_trace_gep(i64 %idxprom) ; CHECK: ret void -; Just make sure we don't insturment this one and don't crash +; Just make sure we don't instrument this one and don't crash define void @gep_3(<2 x i8*> %a, i32 %i, i32 %j) { entry: %0 = getelementptr i8, <2 x i8*> %a, <2 x i64> diff --git a/llvm/test/MC/AArch64/arm64-branch-encoding.s b/llvm/test/MC/AArch64/arm64-branch-encoding.s --- a/llvm/test/MC/AArch64/arm64-branch-encoding.s +++ b/llvm/test/MC/AArch64/arm64-branch-encoding.s @@ -23,7 +23,7 @@ ; CHECK: fixup A - offset: 0, value: L1, kind: fixup_aarch64_pcrel_call26 ;----------------------------------------------------------------------------- -; Contitional branch instructions. +; Conditional branch instructions. ;----------------------------------------------------------------------------- b L1 diff --git a/llvm/test/MC/ARM/mul-v4.s b/llvm/test/MC/ARM/mul-v4.s --- a/llvm/test/MC/ARM/mul-v4.s +++ b/llvm/test/MC/ARM/mul-v4.s @@ -1,4 +1,4 @@ -@ PR17647: MUL/MLA/SMLAL/UMLAL should be avalaibe to IAS for ARMv4 and higher +@ PR17647: MUL/MLA/SMLAL/UMLAL should be available to IAS for ARMv4 and higher @ RUN: llvm-mc < %s -triple armv4-unknown-unknown -show-encoding | FileCheck %s --check-prefix=ARMV4 diff --git a/llvm/test/MC/AsmParser/directive_abort.s b/llvm/test/MC/AsmParser/directive_abort.s --- a/llvm/test/MC/AsmParser/directive_abort.s +++ b/llvm/test/MC/AsmParser/directive_abort.s @@ -1,6 +1,6 @@ # RUN: not llvm-mc -triple i386-unknown-unknown %s 2> %t # RUN: FileCheck -input-file %t %s -# CHECK: error: .abort 'please stop assembing' +# CHECK: error: .abort 'please stop assembling' TEST0: - .abort please stop assembing + .abort please stop assembling diff --git a/llvm/test/MC/ELF/section-sym.s b/llvm/test/MC/ELF/section-sym.s --- a/llvm/test/MC/ELF/section-sym.s +++ b/llvm/test/MC/ELF/section-sym.s @@ -7,7 +7,7 @@ // Test that the relocation points to the first section foo. -// The first seciton foo has index 6 +// The first section foo has index 6 // CHECK: Section { // CHECK: Index: 4 // CHECK-NEXT: Name: foo diff --git a/llvm/test/MC/ELF/section-unique-err4.s b/llvm/test/MC/ELF/section-unique-err4.s --- a/llvm/test/MC/ELF/section-unique-err4.s +++ b/llvm/test/MC/ELF/section-unique-err4.s @@ -1,5 +1,5 @@ // RUN: not llvm-mc -triple x86_64-pc-linux-gnu %s -o - 2>&1 | FileCheck %s -// CHECK: error: expected commma +// CHECK: error: expected comma .section .text,"ax",@progbits,unique 1 diff --git a/llvm/test/MC/Hexagon/capitalizedEndloop.s b/llvm/test/MC/Hexagon/capitalizedEndloop.s --- a/llvm/test/MC/Hexagon/capitalizedEndloop.s +++ b/llvm/test/MC/Hexagon/capitalizedEndloop.s @@ -1,7 +1,7 @@ # RUN: llvm-mc -triple=hexagon -filetype=obj %s | llvm-objdump -d -r - | FileCheck %s # -# Verify that capitaizled endloops work +# Verify that capitalized endloops work { R0 = mpyi(R0,R0) } : endloop0 { R0 = mpyi(R0,R0) } : ENDLOOP0 diff --git a/llvm/test/MC/Mips/mt/module-directive.s b/llvm/test/MC/Mips/mt/module-directive.s --- a/llvm/test/MC/Mips/mt/module-directive.s +++ b/llvm/test/MC/Mips/mt/module-directive.s @@ -4,7 +4,7 @@ # RUN: FileCheck --check-prefix=CHECK-ASM %s # Test that the .module directive sets the MT flag in .MIPS.abiflags when -# assembling to boject files. +# assembling to object files. # Test that the .moodule directive is re-emitted when expanding assembly. diff --git a/llvm/test/MC/X86/align-via-relaxation.s b/llvm/test/MC/X86/align-via-relaxation.s --- a/llvm/test/MC/X86/align-via-relaxation.s +++ b/llvm/test/MC/X86/align-via-relaxation.s @@ -44,7 +44,7 @@ foo: ret - # Check that we're not shifting aroudn the offsets of labels - doing + # Check that we're not shifting around the offsets of labels - doing # that would require a further round of relaxation # CHECK: : # CHECK: 22: eb fe jmp 0x22 diff --git a/llvm/test/Object/archive-update.test b/llvm/test/Object/archive-update.test --- a/llvm/test/Object/archive-update.test +++ b/llvm/test/Object/archive-update.test @@ -17,7 +17,7 @@ RUN: echo newer > %t/tmp.newer/evenlen RUN: touch %t/tmp.newer/evenlen -Create an achive with the newest file +Create an archive with the newest file RUN: llvm-ar rU %t/tmp.a %t/tmp.newer/evenlen RUN: llvm-ar p %t/tmp.a | FileCheck --check-prefix=NEWER %s diff --git a/llvm/test/ThinLTO/X86/guid_collision.ll b/llvm/test/ThinLTO/X86/guid_collision.ll --- a/llvm/test/ThinLTO/X86/guid_collision.ll +++ b/llvm/test/ThinLTO/X86/guid_collision.ll @@ -1,5 +1,5 @@ ; Make sure LTO succeeds even if %t.bc contains a GlobalVariable F and -; %t2.bc cointains a Function F with the same GUID. +; %t2.bc contains a Function F with the same GUID. ; ; RUN: opt -module-summary %s -o %t.bc ; RUN: opt -module-summary %p/Inputs/guid_collision.ll -o %t2.bc diff --git a/llvm/test/Transforms/ArgumentPromotion/profile.ll b/llvm/test/Transforms/ArgumentPromotion/profile.ll --- a/llvm/test/Transforms/ArgumentPromotion/profile.ll +++ b/llvm/test/Transforms/ArgumentPromotion/profile.ll @@ -2,7 +2,7 @@ ; RUN: opt -passes=argpromotion,mem2reg -S < %s | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -; Checks if !prof metadata is corret in deadargelim. +; Checks if !prof metadata is correct in deadargelim. define void @caller() #0 { ; CHECK-LABEL: define {{[^@]+}}@caller() { diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/profile.ll @@ -5,7 +5,7 @@ ; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" -; Checks if !prof metadata is corret in deadargelim. +; Checks if !prof metadata is correct in deadargelim. define void @caller() #0 { ; IS________OPM-LABEL: define {{[^@]+}}@caller() { diff --git a/llvm/test/Transforms/Attributor/dereferenceable-1.ll b/llvm/test/Transforms/Attributor/dereferenceable-1.ll --- a/llvm/test/Transforms/Attributor/dereferenceable-1.ll +++ b/llvm/test/Transforms/Attributor/dereferenceable-1.ll @@ -310,7 +310,7 @@ } ; TEST 8 -; Use Constant range in deereferenceable +; Use Constant range in dereferenceable ; void g(int *p, long long int *range){ ; int r = *range ; // [10, 99] ; fill_range(p, *range); diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll --- a/llvm/test/Transforms/Attributor/liveness.ll +++ b/llvm/test/Transforms/Attributor/liveness.ll @@ -497,7 +497,7 @@ ret i32 0 } -; TEST 6: Undefined behvior, taken from LangRef. +; TEST 6: Undefined behavior, taken from LangRef. ; FIXME: Should be able to detect undefined behavior. define void @ub(i32* %0) { diff --git a/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll b/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll --- a/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll +++ b/llvm/test/Transforms/CodeGenPrepare/AArch64/widen_switch.ll @@ -1,4 +1,4 @@ -;; AArch64 is arbitralily chosen as a 32/64-bit RISC representative to show the transform in all tests. +;; AArch64 is arbitrarily chosen as a 32/64-bit RISC representative to show the transform in all tests. ; RUN: opt < %s -codegenprepare -S -mtriple=aarch64-unknown-unknown | FileCheck %s --check-prefix=ARM64 diff --git a/llvm/test/Transforms/DeadArgElim/aggregates.ll b/llvm/test/Transforms/DeadArgElim/aggregates.ll --- a/llvm/test/Transforms/DeadArgElim/aggregates.ll +++ b/llvm/test/Transforms/DeadArgElim/aggregates.ll @@ -131,7 +131,7 @@ ret i32 %ret } -; Case 6: When considering @mid, the return instruciton has sub-value 0 +; Case 6: When considering @mid, the return instruction has sub-value 0 ; unconditionally live, but 1 only conditionally live. Since at that level we're ; applying the results to the whole of %res, this means %res is live and cannot ; be reduced. There is scope for further optimisation here (though not visible diff --git a/llvm/test/Transforms/DeadArgElim/call_profile.ll b/llvm/test/Transforms/DeadArgElim/call_profile.ll --- a/llvm/test/Transforms/DeadArgElim/call_profile.ll +++ b/llvm/test/Transforms/DeadArgElim/call_profile.ll @@ -1,6 +1,6 @@ ; RUN: opt -passes=deadargelim -S < %s | FileCheck %s -; Checks if !prof metadata is corret in deadargelim. +; Checks if !prof metadata is correct in deadargelim. define void @caller() #0 { ; CHECK: call void @test_vararg(), !prof ![[PROF:[0-9]]] diff --git a/llvm/test/Transforms/EarlyCSE/fence.ll b/llvm/test/Transforms/EarlyCSE/fence.ll --- a/llvm/test/Transforms/EarlyCSE/fence.ll +++ b/llvm/test/Transforms/EarlyCSE/fence.ll @@ -56,7 +56,7 @@ ret i32 %res } -; We can not dead store eliminate accross the fence. We could in +; We can not dead store eliminate across the fence. We could in ; principal reorder the second store above the fence and then DSE either ; store, but this is beyond the simple last-store DSE which EarlyCSE ; implements. diff --git a/llvm/test/Transforms/EarlyCSE/invariant.start.ll b/llvm/test/Transforms/EarlyCSE/invariant.start.ll --- a/llvm/test/Transforms/EarlyCSE/invariant.start.ll +++ b/llvm/test/Transforms/EarlyCSE/invariant.start.ll @@ -575,7 +575,7 @@ ret i32 %sub } -; Invariant load defact starts an invariant.start scope of the appropriate size +; Invariant load defacto starts an invariant.start scope of the appropriate size define i32 @test_invariant_load_scope(ptr %p) { ; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope ; NO_ASSUME-SAME: (ptr [[P:%.*]]) diff --git a/llvm/test/Transforms/GlobalOpt/invariant.group.ll b/llvm/test/Transforms/GlobalOpt/invariant.group.ll --- a/llvm/test/Transforms/GlobalOpt/invariant.group.ll +++ b/llvm/test/Transforms/GlobalOpt/invariant.group.ll @@ -45,7 +45,7 @@ ; %val = load i32, i32* %ptrVal, !invariant.group !0 ; into ; %val = load i32, i32* @tmp3, !invariant.group !0 -; and then we could assume that %val and %val2 to be the same, which coud be +; and then we could assume that %val and %val2 to be the same, which could be ; false, because @changeTmp3ValAndCallBarrierInside() may change the value ; of @tmp3. define void @_not_optimizable() { diff --git a/llvm/test/Transforms/GuardWidening/posion.ll b/llvm/test/Transforms/GuardWidening/posion.ll --- a/llvm/test/Transforms/GuardWidening/posion.ll +++ b/llvm/test/Transforms/GuardWidening/posion.ll @@ -9,7 +9,7 @@ ; interaction with poison values. ; Let x incoming parameter is used for rane checks. -; Test generates 5 checks. One of them (c2) is used to get the corretness +; Test generates 5 checks. One of them (c2) is used to get the correctness ; of nuw/nsw flags for x3 and x5. Others are used in guards and represent ; the checks x + 10 u< L, x + 15 u< L, x + 20 u< L and x + 3 u< L. ; The first two checks are in the first basic block and guard widening @@ -17,7 +17,7 @@ ; When c4 and c3 are considered, number of check becomes more than two ; and combineRangeCheck consider them as profitable even if they are in ; different basic blocks. -; Accoding to algorithm of combineRangeCheck it detects that c3 and c4 +; According to algorithm of combineRangeCheck it detects that c3 and c4 ; are enough to cover c1 and c5, so it ends up with guard of c3 && c4 ; while both of them are poison at entry. This is a bug. diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll b/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll --- a/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll +++ b/llvm/test/Transforms/IndVarSimplify/lftr-dead-ivs.ll @@ -17,7 +17,7 @@ ;; In this example, the pointer IV is dynamicaly dead. As such, the fact that ;; inbounds produces poison *does not* trigger UB in the original loop. As ;; such, the pointer IV can be poison and adding a new use of the pointer -;; IV which dependends on that poison computation in a manner which might +;; IV which depends on that poison computation in a manner which might ;; trigger UB would be incorrect. ;; FIXME: This currently shows a miscompile! define void @neg_dynamically_dead_inbounds(i1 %always_false) #0 { diff --git a/llvm/test/Transforms/Inline/inline_call.ll b/llvm/test/Transforms/Inline/inline_call.ll --- a/llvm/test/Transforms/Inline/inline_call.ll +++ b/llvm/test/Transforms/Inline/inline_call.ll @@ -21,7 +21,7 @@ store i8* bitcast (void ()* @third to i8*), i8** %q, align 8 %tmp = call void (...)* @second(i8** %q) ; The call to 'wrapper' here is to ensure that its function attributes - ; i.e., returning its parameter and having no side effect, will be decuded + ; i.e., returning its parameter and having no side effect, will be deduced ; before the next round of inlining happens to 'top' to expose the bug. %call = call void (...)* @wrapper(void (...)* %tmp) ; The indirect call here is to confuse the alias analyzer so that diff --git a/llvm/test/Transforms/InstCombine/memchr-10.ll b/llvm/test/Transforms/InstCombine/memchr-10.ll --- a/llvm/test/Transforms/InstCombine/memchr-10.ll +++ b/llvm/test/Transforms/InstCombine/memchr-10.ll @@ -11,7 +11,7 @@ @a5 = constant [5 x i8] c"12345" -; Fold memchr(a5 + 5, c, 1) == a5 + 5 to an arbitrary constrant. +; Fold memchr(a5 + 5, c, 1) == a5 + 5 to an arbitrary constraint. ; The call is transformed to a5[5] == c by the memchr simplifier, with ; a5[5] being indeterminate. The equality then is the folded with ; an undefined/arbitrary result. diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll --- a/llvm/test/Transforms/InstCombine/zext.ll +++ b/llvm/test/Transforms/InstCombine/zext.ll @@ -124,7 +124,7 @@ ret i8 %5 } -; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross +; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded across ; nested logical operators. define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) { diff --git a/llvm/test/Transforms/LICM/hoist-phi.ll b/llvm/test/Transforms/LICM/hoist-phi.ll --- a/llvm/test/Transforms/LICM/hoist-phi.ll +++ b/llvm/test/Transforms/LICM/hoist-phi.ll @@ -974,7 +974,7 @@ br label %loop } -; Check that we correctly handle the hoisting of %gep when theres a critical +; Check that we correctly handle the hoisting of %gep when there's a critical ; edge that branches to the preheader. ; CHECK-LABEL: @crit_edge define void @crit_edge(i32* %ptr, i32 %idx, i1 %cond1, i1 %cond2) { diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll @@ -26,7 +26,7 @@ ret void } -; Check adjiacent memory locations are properly matched and the +; Check adjacent memory locations are properly matched and the ; longest chain vectorized ; GCN-LABEL: @interleave_get_longest diff --git a/llvm/test/Transforms/LoopFusion/cannot_fuse.ll b/llvm/test/Transforms/LoopFusion/cannot_fuse.ll --- a/llvm/test/Transforms/LoopFusion/cannot_fuse.ll +++ b/llvm/test/Transforms/LoopFusion/cannot_fuse.ll @@ -76,7 +76,7 @@ ret void } -; Check that fusion detects the two canddates are not adjacent (the exit block +; Check that fusion detects the two candidates are not adjacent (the exit block ; of the first candidate is not the preheader of the second candidate). ; CHECK: Performing Loop Fusion on function non_adjacent diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll --- a/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll @@ -18,7 +18,7 @@ ; REGS: getelementptr i32, i32* %lsr.iv4, i64 1 ; LLC checks that LSR prefers less instructions to less registers. -; LSR should prefer complicated address to additonal add instructions. +; LSR should prefer complicated address to additional add instructions. ; CHECK: LBB0_2: ; CHECK-NEXT: movl (%r{{.+}}, diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll b/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll --- a/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/pr50765.ll @@ -1,6 +1,6 @@ ; RUN: opt -S -loop-reduce < %s | FileCheck %s ; -;This test produces zero factor that becomes a denumerator and fails an assetion. +;This test produces zero factor that becomes a denumerator and fails an assertion. target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll --- a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll +++ b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll @@ -5,7 +5,7 @@ ; 2) -unroll-dynamic-cost-savings-discount ; ; They control loop-unrolling according to the following rules: -; * If size of unrolled loop exceeds the absoulte threshold, we don't unroll +; * If size of unrolled loop exceeds the absolute threshold, we don't unroll ; this loop under any circumstances. ; * If size of unrolled loop is below the '-unroll-threshold', then we'll ; consider this loop as a very small one, and completely unroll it. diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll --- a/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-loop-noalias-scope-decl.ll @@ -5,7 +5,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" -; Loop peeling must result in valid scope declartions +; Loop peeling must result in valid scope declarations define internal fastcc void @test01(i8* %p0, i8* %p1, i8* %p2) unnamed_addr align 2 { ; CHECK-LABEL: @test01( diff --git a/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll b/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll --- a/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll +++ b/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll @@ -703,7 +703,7 @@ ; Has a positive dependency between two stores. Still valid. -; The negative dependecy is in unroll-and-jam-disabled.ll +; The negative dependency is in unroll-and-jam-disabled.ll define void @test7(i32 %I, i32 %E, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 { ; CHECK-LABEL: @test7( ; CHECK-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll --- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -1321,7 +1321,7 @@ ; ; for (int i=0; i<10000; i += 16) { ; if (trigger[i] < 100) { -; A[i] = B[i*2] + trigger[i]; << non-cosecutive access +; A[i] = B[i*2] + trigger[i]; << non-consecutive access ; } ; } ;} diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll --- a/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86-interleaved-accesses-masked-group.ll @@ -886,7 +886,7 @@ -; Unconditioal accesses with gaps under Optsize scenario again, with unknown +; Unconditional accesses with gaps under Optsize scenario again, with unknown ; trip-count this time, in order to check the behavior of folding-the-tail ; (folding the remainder loop into the main loop using masking) together with ; interleaved-groups. Folding-the-tail turns the accesses to conditional which diff --git a/llvm/test/Transforms/LoopVectorize/skip-iterations.ll b/llvm/test/Transforms/LoopVectorize/skip-iterations.ll --- a/llvm/test/Transforms/LoopVectorize/skip-iterations.ll +++ b/llvm/test/Transforms/LoopVectorize/skip-iterations.ll @@ -8,7 +8,7 @@ ; safely speculating that the widened load of A[i] should not fault if the ; scalarized loop does not fault. For example, the ; original load in the scalar loop may not fault, but the last iteration of the -; vectorized load can fault (if it crosses a page boudary for example). +; vectorized load can fault (if it crosses a page boundary for example). ; This last vector iteration is where *one* of the ; scalar iterations lead to the early exit. diff --git a/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll b/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll --- a/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-widen-select-instruction.ll @@ -5,8 +5,8 @@ ; vectorized. These conditions include following: ; * Inner and outer loop invariant select condition ; * Select condition depending on outer loop iteration variable. -; * Select condidition depending on inner loop iteration variable. -; * Select conditition depending on both outer and inner loop iteration +; * Select condition depending on inner loop iteration variable. +; * Select condition depending on both outer and inner loop iteration ; variables. define void @loop_invariant_select(double* noalias nocapture %out, i1 %select, double %a, double %b) { diff --git a/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll b/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll --- a/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll +++ b/llvm/test/Transforms/LowerTypeTests/cfi-direct-call.ll @@ -48,7 +48,7 @@ ; to the actual function, not jump table ; CHECK-NEXT: call void @internal_hidden_def.cfi() -; dso_local function with defailt visibility can be short-circuited +; dso_local function with default visibility can be short-circuited ; CHECK-NEXT: call void @dsolocal_default_def.cfi() ; Local call - no action diff --git a/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll b/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll --- a/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll +++ b/llvm/test/Transforms/MergeFunc/vector-GEP-crash.ll @@ -1,5 +1,5 @@ ; RUN: opt -mergefunc -disable-output < %s -; This used to cause a crash when compairing the GEPs +; This used to cause a crash when comparing the GEPs define void @foo(<2 x i64*>) { %tmp = getelementptr i64, <2 x i64*> %0, <2 x i64> diff --git a/llvm/test/Transforms/Reassociate/xor_reassoc.ll b/llvm/test/Transforms/Reassociate/xor_reassoc.ll --- a/llvm/test/Transforms/Reassociate/xor_reassoc.ll +++ b/llvm/test/Transforms/Reassociate/xor_reassoc.ll @@ -301,7 +301,7 @@ } ; The bug was that when the compiler optimize "(x | c1)" ^ "(x & c2)", it may -; swap the two xor-subexpressions if they are not in canoninical order; however, +; swap the two xor-subexpressions if they are not in canonical order; however, ; when optimizer swaps two sub-expressions, if forgot to swap the cached value ; of c1 and c2 accordingly, hence cause the problem. ; diff --git a/llvm/test/Transforms/SROA/preserve-nonnull.ll b/llvm/test/Transforms/SROA/preserve-nonnull.ll --- a/llvm/test/Transforms/SROA/preserve-nonnull.ll +++ b/llvm/test/Transforms/SROA/preserve-nonnull.ll @@ -38,7 +38,7 @@ ; Make sure we properly handle the !nonnull attribute when we convert ; a pointer load to an integer load. -; FIXME: While this doesn't do anythnig actively harmful today, it really +; FIXME: While this doesn't do anything actively harmful today, it really ; should propagate the !nonnull metadata to range metadata. The irony is, it ; *does* initially, but then we lose that !range metadata before we finish ; SROA. diff --git a/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll b/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll --- a/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll +++ b/llvm/test/Transforms/SampleProfile/inline-mergeprof-dup.ll @@ -73,7 +73,7 @@ !15 = !DILocation(line: 6, scope: !12) -;; Check the profile of funciton sum is only merged once though the original callsite is replicted. +;; Check the profile of function sum is only merged once though the original callsite is replicted. ; CHECK: name: "sum" ; CHECK-NEXT: {!"function_entry_count", i64 46} ; CHECK: !{!"branch_weights", i32 11, i32 37} diff --git a/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll b/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll --- a/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll +++ b/llvm/test/Transforms/SimplifyCFG/X86/remove-debug.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S -hoist-common-insts=true | FileCheck %s -; TODO: Track the acutal DebugLoc of the hoisted instruction when no-line +; TODO: Track the actual DebugLoc of the hoisted instruction when no-line ; DebugLoc is supported (https://reviews.llvm.org/D24180) ; Checks if the debug info for hoisted "x = i" is removed and diff --git a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll --- a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll +++ b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll @@ -362,7 +362,7 @@ ret void } -; Test edge splitting when the default target has icmp and unconditinal +; Test edge splitting when the default target has icmp and unconditional ; branch define i1 @test9(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: @test9( diff --git a/llvm/test/Transforms/SimplifyCFG/switch_thread.ll b/llvm/test/Transforms/SimplifyCFG/switch_thread.ll --- a/llvm/test/Transforms/SimplifyCFG/switch_thread.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_thread.ll @@ -98,7 +98,7 @@ i32 17, label %C i32 42, label %D ] -;; unreacahble. +;; unreachable. C: ; preds = %A, %A call void @DEAD( ) ret void diff --git a/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test b/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test --- a/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test +++ b/llvm/test/tools/llvm-objcopy/COFF/basic-copy.test @@ -10,7 +10,7 @@ following aspects: - The padding of executable sections (lld uses 0xcc, which is int3 on x86) - The actual layout of the string table (it can be filled linearly, - strings can be dedupliated, the table can be optimized by sharing tails + strings can be deduplicated, the table can be optimized by sharing tails of longer strings; different parts in llvm do each of these three options) - The size indication for an empty/missing string table can either be 4 or left out altogether diff --git a/llvm/test/tools/llvm-objcopy/COFF/remove-section.test b/llvm/test/tools/llvm-objcopy/COFF/remove-section.test --- a/llvm/test/tools/llvm-objcopy/COFF/remove-section.test +++ b/llvm/test/tools/llvm-objcopy/COFF/remove-section.test @@ -46,7 +46,7 @@ # # Removing the .bss section removes one symbol and its aux symbol, # and updates the section indices in symbols pointing to later -# symbols, including the aux section defintitions. +# symbols, including the aux section definitions. # # Testing that the absolute symbol @feat.00 survives the section number # mangling. diff --git a/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test b/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test --- a/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test +++ b/llvm/test/tools/llvm-readobj/ELF/call-graph-profile.test @@ -124,7 +124,7 @@ Content: "0041004200" ## '\0', 'A', '\0', 'B', '\0' Symbols: - StName: 1 ## 'A' - - StName: 0xFF ## An arbitrary currupted index in the string table. + - StName: 0xFF ## An arbitrary corrupted index in the string table. - StName: 3 ## 'B' ## Check we report a warning when a relocation section is not present. diff --git a/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test b/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test --- a/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test +++ b/llvm/test/tools/llvm-readobj/ELF/dynamic-reloc.test @@ -15,7 +15,7 @@ Data: ELFDATA2LSB Type: ET_DYN -## Check that we dump all possbile dynamic relocation sections. +## Check that we dump all possible dynamic relocation sections. # RUN: yaml2obj --docnum=2 %s -o %t2.1 # RUN: llvm-readobj --dyn-relocations %t2.1 2>&1 | \ # RUN: FileCheck %s --implicit-check-not=warning: --check-prefix=LLVM-RELOCS diff --git a/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test b/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test --- a/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test +++ b/llvm/test/tools/llvm-readobj/ELF/mips-options-sec.test @@ -76,7 +76,7 @@ 0xDD, 0xEE, 0xFF, 0x1E, ## ODK_REGINFO: bit-mask of used co-processor registers (2). 0x2E, 0x3E, 0x4E, 0x5E, ## ODK_REGINFO: bit-mask of used co-processor registers (3). 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, ## ODK_REGINFO: gp register value. -## A descriptor for one more arbirtary supported option. +## A descriptor for one more arbitrary supported option. 0x1, 0x28, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, diff --git a/llvm/test/tools/obj2yaml/ELF/offset.yaml b/llvm/test/tools/obj2yaml/ELF/offset.yaml --- a/llvm/test/tools/obj2yaml/ELF/offset.yaml +++ b/llvm/test/tools/obj2yaml/ELF/offset.yaml @@ -1,5 +1,5 @@ ## Check how the "Offset" field is dumped by obj2yaml. -## For each section we calulate the expected offset. +## For each section we calculate the expected offset. ## When it does not match the actual offset, we emit the "Offset" key. # RUN: yaml2obj %s -o %t1.o diff --git a/llvm/tools/llvm-ar/llvm-ar.cpp b/llvm/tools/llvm-ar/llvm-ar.cpp --- a/llvm/tools/llvm-ar/llvm-ar.cpp +++ b/llvm/tools/llvm-ar/llvm-ar.cpp @@ -1325,7 +1325,7 @@ cl::ExpandResponseFiles(Saver, getRspQuoting(makeArrayRef(argv, argc)), Argv); - // Get BitMode from enviorment variable "OBJECT_MODE" for AIX OS, if + // Get BitMode from environment variable "OBJECT_MODE" for AIX OS, if // specified. if (object::Archive::getDefaultKindForHost() == object::Archive::K_AIXBIG) { BitMode = getBitMode(getenv("OBJECT_MODE")); diff --git a/llvm/tools/llvm-cov/CoverageReport.cpp b/llvm/tools/llvm-cov/CoverageReport.cpp --- a/llvm/tools/llvm-cov/CoverageReport.cpp +++ b/llvm/tools/llvm-cov/CoverageReport.cpp @@ -108,7 +108,7 @@ OS << '-'; } -/// Return the color which correponds to the coverage percentage of a +/// Return the color which corresponds to the coverage percentage of a /// certain metric. template raw_ostream::Colors determineCoveragePercentageColor(const T &Info) { diff --git a/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp b/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp --- a/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp +++ b/llvm/tools/llvm-dwarfutil/DebugInfoLinker.cpp @@ -276,7 +276,7 @@ DebugInfoLinker.setUpdate(!Options.DoGarbageCollection); std::vector> ObjectsForLinking(1); - std::vector> AddresssMapForLinking(1); + std::vector> AddressMapForLinking(1); std::vector EmptyWarnings; std::unique_ptr Context = DWARFContext::create(File); @@ -292,12 +292,12 @@ } // Add object files to the DWARFLinker. - AddresssMapForLinking[0] = + AddressMapForLinking[0] = std::make_unique(*Context, Options, File); - ObjectsForLinking[0] = std::make_unique( - File.getFileName(), &*Context, AddresssMapForLinking[0].get(), - EmptyWarnings); + ObjectsForLinking[0] = + std::make_unique(File.getFileName(), &*Context, + AddressMapForLinking[0].get(), EmptyWarnings); for (size_t I = 0; I < ObjectsForLinking.size(); I++) DebugInfoLinker.addObjectFile(*ObjectsForLinking[I]); diff --git a/llvm/tools/llvm-exegesis/lib/Analysis.cpp b/llvm/tools/llvm-exegesis/lib/Analysis.cpp --- a/llvm/tools/llvm-exegesis/lib/Analysis.cpp +++ b/llvm/tools/llvm-exegesis/lib/Analysis.cpp @@ -394,7 +394,7 @@ SubtargetInfo_->getWriteLatencyEntry(RSC.SCDesc, I); OS << "
  • " << Entry->Cycles; if (RSC.SCDesc->NumWriteLatencyEntries > 1) { - // Dismabiguate if more than 1 latency. + // Disambiguate if more than 1 latency. OS << " (WriteResourceID " << Entry->WriteResourceID << ")"; } OS << "
  • "; diff --git a/llvm/tools/llvm-objdump/MachODump.cpp b/llvm/tools/llvm-objdump/MachODump.cpp --- a/llvm/tools/llvm-objdump/MachODump.cpp +++ b/llvm/tools/llvm-objdump/MachODump.cpp @@ -3404,7 +3404,7 @@ // These are structs in the Objective-C meta data and read to produce the // comments for disassembly. While these are part of the ABI they are no -// public defintions. So the are here not in include/llvm/BinaryFormat/MachO.h +// public definitions. So the are here not in include/llvm/BinaryFormat/MachO.h // . // The cfstring object in a 64-bit Mach-O file. diff --git a/llvm/tools/llvm-objdump/OtoolOpts.td b/llvm/tools/llvm-objdump/OtoolOpts.td --- a/llvm/tools/llvm-objdump/OtoolOpts.td +++ b/llvm/tools/llvm-objdump/OtoolOpts.td @@ -14,7 +14,7 @@ def h : Flag<["-"], "h">, HelpText<"print mach header">; def I : Flag<["-"], "I">, HelpText<"print indirect symbol table">; def j : Flag<["-"], "j">, HelpText<"print opcode bytes">; -def l : Flag<["-"], "l">, HelpText<"print load commnads">; +def l : Flag<["-"], "l">, HelpText<"print load commands">; def L : Flag<["-"], "L">, HelpText<"print used shared libraries">; def mcpu_EQ : Joined<["-"], "mcpu=">, HelpText<"select cpu for disassembly">; def o : Flag<["-"], "o">, HelpText<"print Objective-C segment">; diff --git a/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp b/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp --- a/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp +++ b/llvm/tools/llvm-pdbutil/llvm-pdbutil.cpp @@ -527,7 +527,7 @@ cl::opt DumpTypeDependents( "dependents", - cl::desc("In conjunection with -type-index and -id-index, dumps the entire " + cl::desc("In conjunction with -type-index and -id-index, dumps the entire " "dependency graph for the specified index instead of " "just the single record with the specified index"), cl::cat(TypeOptions), cl::sub(DumpSubcommand)); diff --git a/llvm/tools/llvm-profgen/PerfReader.cpp b/llvm/tools/llvm-profgen/PerfReader.cpp --- a/llvm/tools/llvm-profgen/PerfReader.cpp +++ b/llvm/tools/llvm-profgen/PerfReader.cpp @@ -1170,7 +1170,7 @@ emitWarningSummary( BogusRange, TotalRangeNum, "of samples are from ranges that have range start after or too far from " - "range end acrossing the unconditinal jmp."); + "range end across the unconditional jmp."); } void PerfScriptReader::parsePerfTraces() { diff --git a/llvm/tools/llvm-profgen/ProfiledBinary.h b/llvm/tools/llvm-profgen/ProfiledBinary.h --- a/llvm/tools/llvm-profgen/ProfiledBinary.h +++ b/llvm/tools/llvm-profgen/ProfiledBinary.h @@ -149,7 +149,7 @@ // size with the best matching context, which is used to help pre-inliner use // accurate post-optimization size to make decisions. // TODO: If an inlinee is completely optimized away, ideally we should have zero -// for its context size, currently we would misss such context since it doesn't +// for its context size, currently we would miss such context since it doesn't // have instructions. To fix this, we need to mark all inlinee with entry probe // but without instructions as having zero size. class BinarySizeContextTracker { diff --git a/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp b/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp --- a/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp +++ b/llvm/tools/llvm-symbolizer/llvm-symbolizer.cpp @@ -289,7 +289,7 @@ SymbolizerOptTable &Tbl) { StringRef ToolName = IsAddr2Line ? "llvm-addr2line" : "llvm-symbolizer"; // The environment variable specifies initial options which can be overridden - // by commnad line options. + // by command line options. Tbl.setInitialOptionsFromEnvironment(IsAddr2Line ? "LLVM_ADDR2LINE_OPTS" : "LLVM_SYMBOLIZER_OPTS"); bool HasError = false; diff --git a/llvm/tools/llvm-xray/xray-account.cpp b/llvm/tools/llvm-xray/xray-account.cpp --- a/llvm/tools/llvm-xray/xray-account.cpp +++ b/llvm/tools/llvm-xray/xray-account.cpp @@ -78,7 +78,7 @@ "sort", cl::desc("sort output by this field"), cl::value_desc("field"), cl::sub(Account), cl::init(SortField::FUNCID), cl::values(clEnumValN(SortField::FUNCID, "funcid", "function id"), - clEnumValN(SortField::COUNT, "count", "funciton call counts"), + clEnumValN(SortField::COUNT, "count", "function call counts"), clEnumValN(SortField::MIN, "min", "minimum function durations"), clEnumValN(SortField::MED, "med", "median function durations"), clEnumValN(SortField::PCT90, "90p", "90th percentile durations"), diff --git a/llvm/tools/llvm-xray/xray-graph.cpp b/llvm/tools/llvm-xray/xray-graph.cpp --- a/llvm/tools/llvm-xray/xray-graph.cpp +++ b/llvm/tools/llvm-xray/xray-graph.cpp @@ -198,7 +198,7 @@ // example caused by tail call elimination and if the option is enabled then // then tries to recover from this. // -// This funciton will also error if the records are out of order, as the trace +// This function will also error if the records are out of order, as the trace // is expected to be sorted. // // The graph generated has an immaginary root for functions called by no-one at diff --git a/llvm/unittests/ADT/FallibleIteratorTest.cpp b/llvm/unittests/ADT/FallibleIteratorTest.cpp --- a/llvm/unittests/ADT/FallibleIteratorTest.cpp +++ b/llvm/unittests/ADT/FallibleIteratorTest.cpp @@ -68,7 +68,7 @@ friend bool operator==(const FallibleCollectionWalker &LHS, const FallibleCollectionWalker &RHS) { - assert(&LHS.C == &RHS.C && "Comparing iterators across collectionss."); + assert(&LHS.C == &RHS.C && "Comparing iterators across collections."); return LHS.Idx == RHS.Idx; } diff --git a/llvm/unittests/ADT/SequenceTest.cpp b/llvm/unittests/ADT/SequenceTest.cpp --- a/llvm/unittests/ADT/SequenceTest.cpp +++ b/llvm/unittests/ADT/SequenceTest.cpp @@ -1,4 +1,4 @@ -//===- SequenceTest.cpp - Unit tests for a sequence abstraciton -----------===// +//===- SequenceTest.cpp - Unit tests for a sequence abstraction -----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp --- a/llvm/unittests/Analysis/MemorySSATest.cpp +++ b/llvm/unittests/Analysis/MemorySSATest.cpp @@ -102,7 +102,7 @@ MemoryPhi *MP = MSSA.getMemoryAccess(Merge); EXPECT_NE(MP, nullptr); - // Create the load memory acccess + // Create the load memory access MemoryUse *LoadAccess = cast(Updater.createMemoryAccessInBB( LoadInst, MP, Merge, MemorySSA::Beginning)); MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess(); @@ -238,7 +238,7 @@ B.SetInsertPoint(Merge, Merge->begin()); LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg); - // Create the load memory acccess + // Create the load memory access MemoryUse *LoadAccess = cast(Updater.createMemoryAccessInBB( LoadInst, nullptr, Merge, MemorySSA::Beginning)); Updater.insertUse(LoadAccess); @@ -905,7 +905,7 @@ setupAnalyses(); MemorySSA &MSSA = *Analyses->MSSA; MemorySSAUpdater Updater(&MSSA); - // Create the load memory acccess + // Create the load memory access LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg); MemoryUse *LoadAccess = cast(Updater.createMemoryAccessInBB( LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning)); diff --git a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp --- a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp +++ b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp @@ -74,7 +74,7 @@ } TEST_F(CoreAPIsStandardTest, ResolveUnrequestedSymbol) { - // Test that all symbols in a MaterializationUnit materialize corretly when + // Test that all symbols in a MaterializationUnit materialize correctly when // only a subset of symbols is looked up. // The aim here is to ensure that we're not relying on the query to set up // state needed to materialize the unrequested symbols. @@ -869,7 +869,7 @@ } TEST_F(CoreAPIsStandardTest, FailMaterializerWithUnqueriedSymbols) { - // Make sure that symbols with no queries aganist them still + // Make sure that symbols with no queries against them still // fail correctly. bool MaterializerRun = false; diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp --- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp +++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp @@ -4098,7 +4098,7 @@ EXPECT_NE(LockVar, nullptr); // Find the allocation of a local array that will be used to call the runtime - // reduciton function. + // reduction function. BasicBlock &AllocBlock = Outlined->getEntryBlock(); Value *LocalArray = nullptr; for (Instruction &I : AllocBlock) { diff --git a/llvm/utils/TableGen/CodeGenSchedule.h b/llvm/utils/TableGen/CodeGenSchedule.h --- a/llvm/utils/TableGen/CodeGenSchedule.h +++ b/llvm/utils/TableGen/CodeGenSchedule.h @@ -429,7 +429,7 @@ // List of unique SchedClasses. std::vector SchedClasses; - // Any inferred SchedClass has an index greater than NumInstrSchedClassses. + // Any inferred SchedClass has an index greater than NumInstrSchedClasses. unsigned NumInstrSchedClasses; RecVec ProcResourceDefs; diff --git a/llvm/utils/TableGen/DAGISelMatcher.h b/llvm/utils/TableGen/DAGISelMatcher.h --- a/llvm/utils/TableGen/DAGISelMatcher.h +++ b/llvm/utils/TableGen/DAGISelMatcher.h @@ -46,28 +46,28 @@ public: enum KindTy { // Matcher state manipulation. - Scope, // Push a checking scope. - RecordNode, // Record the current node. - RecordChild, // Record a child of the current node. - RecordMemRef, // Record the memref in the current node. - CaptureGlueInput, // If the current node has an input glue, save it. - MoveChild, // Move current node to specified child. - MoveParent, // Move current node to parent. + Scope, // Push a checking scope. + RecordNode, // Record the current node. + RecordChild, // Record a child of the current node. + RecordMemRef, // Record the memref in the current node. + CaptureGlueInput, // If the current node has an input glue, save it. + MoveChild, // Move current node to specified child. + MoveParent, // Move current node to parent. // Predicate checking. - CheckSame, // Fail if not same as prev match. - CheckChildSame, // Fail if child not same as prev match. + CheckSame, // Fail if not same as prev match. + CheckChildSame, // Fail if child not same as prev match. CheckPatternPredicate, - CheckPredicate, // Fail if node predicate fails. - CheckOpcode, // Fail if not opcode. - SwitchOpcode, // Dispatch based on opcode. - CheckType, // Fail if not correct type. - SwitchType, // Dispatch based on type. - CheckChildType, // Fail if child has wrong type. - CheckInteger, // Fail if wrong val. - CheckChildInteger, // Fail if child is wrong val. - CheckCondCode, // Fail if not condcode. - CheckChild2CondCode, // Fail if child is wrong condcode. + CheckPredicate, // Fail if node predicate fails. + CheckOpcode, // Fail if not opcode. + SwitchOpcode, // Dispatch based on opcode. + CheckType, // Fail if not correct type. + SwitchType, // Dispatch based on type. + CheckChildType, // Fail if child has wrong type. + CheckInteger, // Fail if wrong val. + CheckChildInteger, // Fail if child is wrong val. + CheckCondCode, // Fail if not condcode. + CheckChild2CondCode, // Fail if child is wrong condcode. CheckValueType, CheckComplexPat, CheckAndImm, @@ -76,7 +76,7 @@ CheckImmAllZerosV, CheckFoldableChainNode, - // Node creation/emisssion. + // Node creation/emission. EmitInteger, // Create a TargetConstant EmitStringInteger, // Create a TargetConstant from a string. EmitRegister, // Create a register. diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp --- a/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -3493,7 +3493,7 @@ bool OperandPredicateMatcher::isHigherPriorityThan( const OperandPredicateMatcher &B) const { // Generally speaking, an instruction is more important than an Int or a - // LiteralInt because it can cover more nodes but theres an exception to + // LiteralInt because it can cover more nodes but there's an exception to // this. G_CONSTANT's are less important than either of those two because they // are more permissive. @@ -5196,7 +5196,7 @@ // naming being the same. One possible solution would be to have // explicit operator for operation capture and reference those. // The plus side is that it would expose opportunities to share - // the capture accross rules. The downside is that it would + // the capture across rules. The downside is that it would // introduce a dependency between predicates (captures must happen // before their first use.) InstructionMatcher &InsnMatcherTemp = M.addInstructionMatcher(Src->getName()); diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py --- a/llvm/utils/UpdateTestChecks/common.py +++ b/llvm/utils/UpdateTestChecks/common.py @@ -599,7 +599,7 @@ else: # This means a previous RUN line produced a body for this function # that is different from the one produced by this current RUN line, - # so the body can't be common accross RUN lines. We use None to + # so the body can't be common across RUN lines. We use None to # indicate that. self._func_dict[prefix][func] = None else: diff --git a/llvm/utils/docker/build_docker_image.sh b/llvm/utils/docker/build_docker_image.sh --- a/llvm/utils/docker/build_docker_image.sh +++ b/llvm/utils/docker/build_docker_image.sh @@ -58,7 +58,7 @@ clang. mydocker/clang-debian10:latest - a small image with preinstalled clang. Please note that this example produces a not very useful installation, since it -doesn't override CMake defaults, which produces a Debug and non-boostrapped +doesn't override CMake defaults, which produces a Debug and non-bootstrapped version of clang. To get a 2-stage clang build, you could use this command: diff --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py --- a/llvm/utils/git/github-automation.py +++ b/llvm/utils/git/github-automation.py @@ -410,7 +410,7 @@ return False parser = argparse.ArgumentParser() -parser.add_argument('--token', type=str, required=True, help='GitHub authentiation token') +parser.add_argument('--token', type=str, required=True, help='GitHub authentication token') parser.add_argument('--repo', type=str, default=os.getenv('GITHUB_REPOSITORY', 'llvm/llvm-project'), help='The GitHub repository that we are working with in the form of / (e.g. llvm/llvm-project)') subparsers = parser.add_subparsers(dest='command') @@ -420,7 +420,7 @@ issue_subscriber_parser.add_argument('--issue-number', type=int, required=True) release_workflow_parser = subparsers.add_parser('release-workflow') -release_workflow_parser.add_argument('--llvm-project-dir', type=str, default='.', help='directory containing the llvm-project checout') +release_workflow_parser.add_argument('--llvm-project-dir', type=str, default='.', help='directory containing the llvm-project checkout') release_workflow_parser.add_argument('--issue-number', type=int, required=True, help='The issue number to update') release_workflow_parser.add_argument('--phab-token', type=str, help='Phabricator conduit API token. See https://reviews.llvm.org/settings/user//page/apitokens/') release_workflow_parser.add_argument('--branch-repo-token', type=str, diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn --- a/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn +++ b/llvm/utils/gn/secondary/lldb/tools/lldb-test/BUILD.gn @@ -19,7 +19,7 @@ "//llvm/lib/Support", ] - # XXX commment + # XXX comment include_dirs = [ "//lldb/source" ] sources = [ "FormatUtil.cpp", diff --git a/llvm/utils/lit/lit/ProgressBar.py b/llvm/utils/lit/lit/ProgressBar.py --- a/llvm/utils/lit/lit/ProgressBar.py +++ b/llvm/utils/lit/lit/ProgressBar.py @@ -38,7 +38,7 @@ >>> term = TerminalController() >>> if term.CLEAR_SCREEN: - ... print('This terminal supports clearning the screen.') + ... print('This terminal supports clearing the screen.') Finally, if the width and height of the terminal are known, then they will be stored in the `COLS` and `LINES` attributes. diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py --- a/llvm/utils/lit/lit/TestRunner.py +++ b/llvm/utils/lit/lit/TestRunner.py @@ -50,7 +50,7 @@ # during expansion. # # COMMAND that follows %dbg(ARG) is also captured. COMMAND can be -# empty as a result of conditinal substitution. +# empty as a result of conditional substitution. kPdbgRegex = '%dbg\\(([^)\'"]*)\\)(.*)' class ShellEnvironment(object): diff --git a/llvm/utils/unicode-case-fold.py b/llvm/utils/unicode-case-fold.py --- a/llvm/utils/unicode-case-fold.py +++ b/llvm/utils/unicode-case-fold.py @@ -62,7 +62,7 @@ # b is a list of mappings. All the mappings are assumed to have the same -# shift and the stride between adjecant mappings (if any) is constant. +# shift and the stride between adjacent mappings (if any) is constant. def dump_block(b): global body diff --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py --- a/llvm/utils/update_analyze_test_checks.py +++ b/llvm/utils/update_analyze_test_checks.py @@ -26,7 +26,7 @@ A common pattern is to have the script insert complete checking of every instruction. Then, edit it down to only check the relevant instructions. The script is designed to make adding checks to a test case fast, it is *not* -designed to be authoratitive about what constitutes a good test! +designed to be authoritative about what constitutes a good test! """ from __future__ import print_function diff --git a/llvm/utils/update_cc_test_checks.py b/llvm/utils/update_cc_test_checks.py --- a/llvm/utils/update_cc_test_checks.py +++ b/llvm/utils/update_cc_test_checks.py @@ -371,7 +371,7 @@ m = common.CHECK_RE.match(line) if m and m.group(1) in prefix_set: continue # Don't append the existing CHECK lines - # Skip special separator comments added by commmon.add_global_checks. + # Skip special separator comments added by common.add_global_checks. if line.strip() == '//' + common.SEPARATOR: continue if idx in line2func_list: