diff --git a/llvm/bindings/ocaml/llvm/llvm.mli b/llvm/bindings/ocaml/llvm/llvm.mli --- a/llvm/bindings/ocaml/llvm/llvm.mli +++ b/llvm/bindings/ocaml/llvm/llvm.mli @@ -1295,7 +1295,7 @@ val const_extractvalue : llvalue -> int array -> llvalue (** [const_insertvalue agg val idxs] inserts the value [val] in the specified - indexs [idxs] in the aggegate [agg]. Each [idxs] must be less than the size + indexs [idxs] in the aggregate [agg]. Each [idxs] must be less than the size of the aggregate. See the method [llvm::ConstantExpr::getInsertValue]. *) val const_insertvalue : llvalue -> llvalue -> int array -> llvalue diff --git a/llvm/include/llvm/ADT/PointerUnion.h b/llvm/include/llvm/ADT/PointerUnion.h --- a/llvm/include/llvm/ADT/PointerUnion.h +++ b/llvm/include/llvm/ADT/PointerUnion.h @@ -93,7 +93,7 @@ static constexpr int NumLowBitsAvailable = lowBitsAvailable(); }; - /// Implement assigment in terms of construction. + /// Implement assignment in terms of construction. template struct AssignableFrom { Derived &operator=(T t) { return static_cast(*this) = Derived(t); diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -232,9 +232,9 @@ /// return undef. Value *isBytewiseValue(Value *V, const DataLayout &DL); - /// Given an aggregrate and an sequence of indices, see if the scalar value + /// Given an aggregate and an sequence of indices, see if the scalar value /// indexed is already around as a register, for example if it were inserted - /// directly into the aggregrate. + /// directly into the aggregate. /// /// If InsertBefore is not null, this function will duplicate (modified) /// insertvalues when a part of a nested struct is extracted. diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -107,7 +107,7 @@ /// make these decisions: function formal arguments, call /// instruction args, call instruction returns and function /// returns. However, once a decision has been made on where an - /// arugment should go, exactly what happens can vary slightly. This + /// argument should go, exactly what happens can vary slightly. This /// class abstracts the differences. struct ValueHandler { ValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, diff --git a/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h b/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h --- a/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h +++ b/llvm/include/llvm/DebugInfo/GSYM/FunctionInfo.h @@ -32,7 +32,7 @@ /// The function information gets the function start address as an argument /// to the FunctionInfo::decode(...) function. This information is calculated /// from the GSYM header and an address offset from the GSYM address offsets -/// table. The encoded FunctionInfo information must be alinged to a 4 byte +/// table. The encoded FunctionInfo information must be aligned to a 4 byte /// boundary. /// /// The encoded data for a FunctionInfo starts with fixed data that all diff --git a/llvm/include/llvm/DebugInfo/GSYM/GsymCreator.h b/llvm/include/llvm/DebugInfo/GSYM/GsymCreator.h --- a/llvm/include/llvm/DebugInfo/GSYM/GsymCreator.h +++ b/llvm/include/llvm/DebugInfo/GSYM/GsymCreator.h @@ -82,15 +82,15 @@ /// The resulting GSYM size is smaller and causes fewer pages to be touched /// during address lookups when the address table is smaller. The size of the /// address offsets in the address table is specified in the header in -/// Header.AddrOffSize. The first offset in the address table is alinged to -/// Header.AddrOffSize alignement to ensure efficient access when loaded into +/// Header.AddrOffSize. The first offset in the address table is aligned to +/// Header.AddrOffSize alignment to ensure efficient access when loaded into /// memory. /// /// FUNCTION INFO OFFSETS TABLE /// /// The function info offsets table immediately follows the address table and /// consists of Header.NumAddresses 32 bit file offsets: one for each address -/// in the address table. This data is algined to a 4 byte boundary. The +/// in the address table. This data is aligned to a 4 byte boundary. The /// offsets in this table are the relative offsets from the start offset of the /// GSYM header and point to the function info data for each address in the /// address table. Keeping this data separate from the address table helps to diff --git a/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h b/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h --- a/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/OrcABISupport.h @@ -29,7 +29,7 @@ /// Generic ORC ABI support. /// -/// This class can be substituted as the target architecure support class for +/// This class can be substituted as the target architecture support class for /// ORC templates that require one (e.g. IndirectStubsManagers). It does not /// support lazy JITing however, and any attempt to use that functionality /// will result in execution of an llvm_unreachable. diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -1947,7 +1947,7 @@ /// Is the function attribute S disallowed by some operand bundle on /// this operand bundle user? bool isFnAttrDisallowedByOpBundle(StringRef S) const { - // Operand bundles only possibly disallow readnone, readonly and argmenonly + // Operand bundles only possibly disallow readnone, readonly and argmemonly // attributes. All String attributes are fine. return false; } diff --git a/llvm/include/llvm/Support/Allocator.h b/llvm/include/llvm/Support/Allocator.h --- a/llvm/include/llvm/Support/Allocator.h +++ b/llvm/include/llvm/Support/Allocator.h @@ -269,7 +269,7 @@ inline LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * Allocate(size_t Size, size_t Alignment) { - assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead."); + assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead."); return Allocate(Size, Align(Alignment)); } diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -1292,7 +1292,7 @@ // ReportMultipleNearMisses - // When 0, the assembly matcher reports an error for one encoding or operand // that did not match the parsed instruction. - // When 1, the assmebly matcher returns a list of encodings that were close + // When 1, the assembly matcher returns a list of encodings that were close // to matching the parsed instruction, so to allow more detailed error // messages. bit ReportMultipleNearMisses = 0; diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -283,7 +283,7 @@ Argument *getAssociatedArgument() const; /// Return true if the position refers to a function interface, that is the - /// function scope, the function return, or an argumnt. + /// function scope, the function return, or an argument. bool isFnInterfaceKind() const { switch (getPositionKind()) { case IRPosition::IRP_FUNCTION: @@ -510,7 +510,7 @@ /// - the argument of the callee (IRP_ARGUMENT), if known /// - the callee (IRP_FUNCTION), if known /// - the position the call site argument is associated with if it is not -/// anchored to the call site, e.g., if it is an arugment then the argument +/// anchored to the call site, e.g., if it is an argument then the argument /// (IRP_ARGUMENT) class SubsumingPositionIterator { SmallVector IRPositions; @@ -2170,7 +2170,7 @@ /// Return assumed alignment. unsigned getAssumedAlign() const { return getAssumed(); } - /// Return known alignemnt. + /// Return known alignment. unsigned getKnownAlign() const { return getKnown(); } /// Create an abstract attribute view for the position \p IRP. diff --git a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp --- a/llvm/lib/Bitcode/Reader/MetadataLoader.cpp +++ b/llvm/lib/Bitcode/Reader/MetadataLoader.cpp @@ -411,7 +411,7 @@ } } -} // anonynous namespace +} // anonymous namespace static Error error(const Twine &Message) { return make_error( diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -262,7 +262,7 @@ /// Look through operations that will be free to find the earliest source of /// this value. /// -/// @param ValLoc If V has aggegate type, we will be interested in a particular +/// @param ValLoc If V has aggregate type, we will be interested in a particular /// scalar component. This records its address; the reverse of this list gives a /// sequence of indices appropriate for an extractvalue to locate the important /// value. This value is updated during the function and on exit will indicate diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -620,7 +620,7 @@ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); SDValue ExtPassThru = GetPromotedInteger(N->getPassThru()); assert(NVT == ExtPassThru.getValueType() && - "Gather result type and the passThru agrument type should be the same"); + "Gather result type and the passThru argument type should be the same"); SDLoc dl(N); SDValue Ops[] = {N->getChain(), ExtPassThru, N->getMask(), N->getBasePtr(), diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4428,7 +4428,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { SDLoc sdl = getCurSDLoc(); - // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask) + // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask) const Value *Ptr = I.getArgOperand(1); SDValue Src0 = getValue(I.getArgOperand(0)); SDValue Mask = getValue(I.getArgOperand(3)); diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1287,7 +1287,7 @@ bool Commuted = tryInstructionCommute(&MI, DstIdx, SrcIdx, regBKilled, Dist); // If the instruction is convertible to 3 Addr, instead - // of returning try 3 Addr transformation aggresively and + // of returning try 3 Addr transformation aggressively and // use this variable to check later. Because it might be better. // For example, we can just use `leal (%rsi,%rdi), %eax` and `ret` // instead of the following code. diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp --- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp +++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp @@ -39,7 +39,7 @@ using ContentDescriptors = SmallVector; -} // end anonmyous namespace +} // end anonymous namespace void DWARFDebugLine::ContentTypeTracker::trackContentType( dwarf::LineNumberEntryFormat ContentType) { diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -951,7 +951,7 @@ /// /// \param StartAddr start address of the fused/unfused branch. /// \param Size size of the fused/unfused branch. -/// \param BoundaryAlignment aligment requirement of the branch. +/// \param BoundaryAlignment alignment requirement of the branch. /// \returns true if the branch cross the boundary. static bool mayCrossBoundary(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment) { @@ -964,7 +964,7 @@ /// /// \param StartAddr start address of the fused/unfused branch. /// \param Size size of the fused/unfused branch. -/// \param BoundaryAlignment aligment requirement of the branch. +/// \param BoundaryAlignment alignment requirement of the branch. /// \returns true if the branch is against the boundary. static bool isAgainstBoundary(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment) { @@ -976,7 +976,7 @@ /// /// \param StartAddr start address of the fused/unfused branch. /// \param Size size of the fused/unfused branch. -/// \param BoundaryAlignment aligment requirement of the branch. +/// \param BoundaryAlignment alignment requirement of the branch. /// \returns true if the branch needs padding. static bool needPadding(uint64_t StartAddr, uint64_t Size, Align BoundaryAlignment) { diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -601,7 +601,7 @@ /// and /// Result = (LHS_A - LHS_B + LHS_Cst) + (RHS_A - RHS_B + RHS_Cst). /// -/// This routine attempts to aggresively fold the operands such that the result +/// This routine attempts to aggressively fold the operands such that the result /// is representable in an MCValue, but may not always succeed. /// /// \returns True on success, false if the result is not representable in an diff --git a/llvm/lib/MC/MCParser/COFFAsmParser.cpp b/llvm/lib/MC/MCParser/COFFAsmParser.cpp --- a/llvm/lib/MC/MCParser/COFFAsmParser.cpp +++ b/llvm/lib/MC/MCParser/COFFAsmParser.cpp @@ -144,7 +144,7 @@ COFFAsmParser() = default; }; -} // end annonomous namespace. +} // end anonymous namespace. static SectionKind computeSectionKind(unsigned Flags) { if (Flags & COFF::IMAGE_SCN_MEM_EXECUTE) diff --git a/llvm/lib/ProfileData/GCOV.cpp b/llvm/lib/ProfileData/GCOV.cpp --- a/llvm/lib/ProfileData/GCOV.cpp +++ b/llvm/lib/ProfileData/GCOV.cpp @@ -439,7 +439,7 @@ //===----------------------------------------------------------------------===// // Cycles detection // -// The algorithm in GCC is based on the algorihtm by Hawick & James: +// The algorithm in GCC is based on the algorithm by Hawick & James: // "Enumerating Circuits and Loops in Graphs with Self-Arcs and Multiple-Arcs" // http://complexity.massey.ac.nz/cstn/013/cstn-013.pdf. diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp @@ -1000,7 +1000,7 @@ 0)); // Finally we can copy the returned value back into its virtual-register. In - // symmetry with the arugments, the physical register must be an + // symmetry with the arguments, the physical register must be an // implicit-define of the call instruction. if (!Info.OrigRet.Ty->isVoidTy()) { CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv); diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.td +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.td @@ -481,7 +481,7 @@ // Vector operand versions of the FP registers. Alternate name printing and -// assmebler matching. +// assembler matching. def VectorReg64AsmOperand : AsmOperandClass { let Name = "VectorReg64"; let PredicateMethod = "isNeonVectorReg"; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -1,4 +1,4 @@ -//===-- AMDGPUAsmPrinter.cpp - AMDGPU assembly printer -------------------===// +//===-- AMDGPUAsmPrinter.cpp - AMDGPU assembly printer --------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp @@ -1,4 +1,4 @@ -//===-- R600AsmPrinter.cpp - R600 Assebly printer ------------------------===// +//===-- R600AsmPrinter.cpp - R600 Assembly printer ------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Target/ARM/ARMCallingConv.cpp b/llvm/lib/Target/ARM/ARMCallingConv.cpp --- a/llvm/lib/Target/ARM/ARMCallingConv.cpp +++ b/llvm/lib/Target/ARM/ARMCallingConv.cpp @@ -181,7 +181,7 @@ assert(PendingMembers[0].getLocVT() == LocVT); // Add the argument to the list to be allocated once we know the size of the - // aggregate. Store the type's required alignmnent as extra info for later: in + // aggregate. Store the type's required alignment as extra info for later: in // the [N x i64] case all trace has been removed by the time we actually get // to do allocation. PendingMembers.push_back(CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo, diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -14715,7 +14715,7 @@ if (!VT.isSimple()) return false; - // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus + // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); auto Ty = VT.getSimpleVT().SimpleTy; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -6703,7 +6703,7 @@ // omitted. We don't have a way to do that in tablegen, so fix it up here. // // We have to be careful to not emit an invalid Rt2 here, because the rest of -// the assmebly parser could then generate confusing diagnostics refering to +// the assembly parser could then generate confusing diagnostics refering to // it. If we do find anything that prevents us from doing the transformation we // bail out, and let the assembly parser report an error on the instruction as // it is written. diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h b/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.h @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file declares the unwind opcode assmebler for ARM exception handling +// This file declares the unwind opcode assembler for ARM exception handling // table. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements the unwind opcode assmebler for ARM exception handling +// This file implements the unwind opcode assembler for ARM exception handling // table. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td --- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td +++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.td @@ -119,7 +119,7 @@ def P2 : Rp<2, "p2">, DwarfRegNum<[65]>; def P3 : Rp<3, "p3">, DwarfRegNum<[66]>; - // Fake register to represent USR.OVF bit. Artihmetic/saturating instruc- + // Fake register to represent USR.OVF bit. Arithmetic/saturating instruc- // tions modify this bit, and multiple such instructions are allowed in the // same packet. We need to ignore output dependencies on this bit, but not // on the entire USR. diff --git a/llvm/lib/Target/Mips/MipsInstrFPU.td b/llvm/lib/Target/Mips/MipsInstrFPU.td --- a/llvm/lib/Target/Mips/MipsInstrFPU.td +++ b/llvm/lib/Target/Mips/MipsInstrFPU.td @@ -628,7 +628,7 @@ INSN_MIPS5_32R2_NOT_32R6_64R6, FGR_64; } -/// Floating-point Aritmetic +/// Floating-point Arithmetic let AdditionalPredicates = [NotInMicroMips] in { def FADD_S : MMRel, ADDS_FT<"add.s", FGR32Opnd, II_ADD_S, 1, fadd>, ADDS_FM<0x00, 16>, ISA_MIPS1; diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -380,7 +380,7 @@ // This is eiher: // 1) A fixed frame index object which we know are aligned so // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't - // need to consider the alignement here. + // need to consider the alignment here. // 2) A not fixed object but in that case we now know that the min required // alignment is no more than 1 based on the previous check. if (InstrInfo->isXFormMemOp(Opcode)) diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -9621,7 +9621,7 @@ defm : AVX512_pmovx_patterns<"VPMOVZX", zext, zext_invec>; // Without BWI we can't do a trunc from v16i16 to v16i8. DAG combine can merge -// ext+trunc aggresively making it impossible to legalize the DAG to this +// ext+trunc aggressively making it impossible to legalize the DAG to this // pattern directly. let Predicates = [HasAVX512, NoBWI] in { def: Pat<(v16i8 (trunc (v16i16 VR256X:$src))), diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -154,7 +154,7 @@ static cl::opt AnnotateDeclarationCallSites( "attributor-annotate-decl-cs", cl::Hidden, - cl::desc("Annoate call sites of function declarations."), cl::init(false)); + cl::desc("Annotate call sites of function declarations."), cl::init(false)); static cl::opt ManifestInternal( "attributor-manifest-internal", cl::Hidden, @@ -3544,7 +3544,7 @@ if (SI->getPointerOperand() == &AnchorVal) if (SI->getAlignment() < getAssumedAlign()) { STATS_DECLTRACK(AAAlign, Store, - "Number of times alignemnt added to a store"); + "Number of times alignment added to a store"); SI->setAlignment(Align(getAssumedAlign())); Changed = ChangeStatus::CHANGED; } @@ -3553,7 +3553,7 @@ if (LI->getAlignment() < getAssumedAlign()) { LI->setAlignment(Align(getAssumedAlign())); STATS_DECLTRACK(AAAlign, Load, - "Number of times alignemnt added to a load"); + "Number of times alignment added to a load"); Changed = ChangeStatus::CHANGED; } } diff --git a/llvm/lib/Transforms/IPO/PartialInlining.cpp b/llvm/lib/Transforms/IPO/PartialInlining.cpp --- a/llvm/lib/Transforms/IPO/PartialInlining.cpp +++ b/llvm/lib/Transforms/IPO/PartialInlining.cpp @@ -702,7 +702,7 @@ return OutliningInfo; } -// Check if there is PGO data or user annoated branch data: +// Check if there is PGO data or user annotated branch data: static bool hasProfileData(Function *F, FunctionOutliningInfo *OI) { if (F->hasProfileData()) return true; diff --git a/llvm/lib/Transforms/Utils/AddDiscriminators.cpp b/llvm/lib/Transforms/Utils/AddDiscriminators.cpp --- a/llvm/lib/Transforms/Utils/AddDiscriminators.cpp +++ b/llvm/lib/Transforms/Utils/AddDiscriminators.cpp @@ -233,7 +233,7 @@ LocationSet CallLocations; for (auto &I : B.getInstList()) { // We bypass intrinsic calls for the following two reasons: - // 1) We want to avoid a non-deterministic assigment of + // 1) We want to avoid a non-deterministic assignment of // discriminators. // 2) We want to minimize the number of base discriminators used. if (!isa(I) && (!isa(I) || isa(I))) diff --git a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll --- a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll +++ b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll @@ -1,5 +1,5 @@ ; RUN: llc -mcpu=generic -mtriple=i386-apple-darwin -tailcallopt -enable-misched=false < %s | FileCheck %s -; Check that lowered argumens do not overwrite the return address before it is moved. +; Check that lowered arguments do not overwrite the return address before it is moved. ; Bug 6225 ; ; If a call is a fastcc tail call and tail call optimization is enabled, the diff --git a/llvm/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll b/llvm/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll --- a/llvm/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll +++ b/llvm/test/CodeGen/X86/DynamicCalleeSavedRegisters.ll @@ -7,7 +7,7 @@ ; One might think that the caller could assume that ESI value is the same before ; and after calling the callee. ; However, RegCall also says that a register that was used for -; passing/returning argumnets, can be assumed to be modified by the callee. +; passing/returning arguments, can be assumed to be modified by the callee. ; In other words, it is no longer a callee saved register. ; In this case we want to see that EDX/ECX values are saved and EDI/ESI are assumed ; to be modified by the callee. diff --git a/llvm/test/CodeGen/X86/lea.ll b/llvm/test/CodeGen/X86/lea.ll --- a/llvm/test/CodeGen/X86/lea.ll +++ b/llvm/test/CodeGen/X86/lea.ll @@ -23,7 +23,7 @@ ; ISel the add of -4 with a neg and use an lea for the rest of the -; arithemtic. +; arithmetic. define i32 @test2(i32 %x_offs) nounwind readnone { ; LINUX-LABEL: test2: ; LINUX: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -458,7 +458,7 @@ %struct.RT = type { i8, [10 x [20 x i32]], i8 } %struct.ST = type { i32, double, %struct.RT } -; Masked gather for agregate types +; Masked gather for aggregate types ; Test9 and Test10 should give the same result (scalar and vector indices in GEP) diff --git a/llvm/test/CodeGen/X86/swifterror.ll b/llvm/test/CodeGen/X86/swifterror.ll --- a/llvm/test/CodeGen/X86/swifterror.ll +++ b/llvm/test/CodeGen/X86/swifterror.ll @@ -533,7 +533,7 @@ } ; CHECK-APPLE-LABEL: params_in_reg -; Save callee save registers to store clobbered arugments. +; Save callee save registers to store clobbered arguments. ; CHECK-APPLE: pushq %rbp ; CHECK-APPLE: pushq %r15 ; CHECK-APPLE: pushq %r14 diff --git a/llvm/test/LTO/X86/parallel.ll b/llvm/test/LTO/X86/parallel.ll --- a/llvm/test/LTO/X86/parallel.ll +++ b/llvm/test/LTO/X86/parallel.ll @@ -3,7 +3,7 @@ ; RUN: llvm-nm %t.o.0 | FileCheck --check-prefix=CHECK0 %s ; RUN: llvm-nm %t.o.1 | FileCheck --check-prefix=CHECK1 %s -; FIXME: Investigate test failures on these architecures. +; FIXME: Investigate test failures on these architectures. ; UNSUPPORTED: mips, mipsel, aarch64, powerpc64 target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/MC/AArch64/arm64-directive_loh.s b/llvm/test/MC/AArch64/arm64-directive_loh.s --- a/llvm/test/MC/AArch64/arm64-directive_loh.s +++ b/llvm/test/MC/AArch64/arm64-directive_loh.s @@ -84,13 +84,13 @@ # CHECK-ERRORS-NEXT: ^ .loh 1 L1, L2, L3 -# Too few argumets. +# Too few arguments. # CHECK-ERRORS: error: unexpected token in '.loh' directive # CHECK-ERRORS-NEXT: .loh AdrpAdrp L1 # CHECK-ERRORS-NEXT: ^ .loh AdrpAdrp L1 -# Too few argumets with alternative syntax. +# Too few arguments with alternative syntax. # CHECK-ERRORS: error: unexpected token in '.loh' directive # CHECK-ERRORS-NEXT: .loh 1 L1 # CHECK-ERRORS-NEXT: ^ diff --git a/llvm/test/MC/ARM/misaligned-blx.s b/llvm/test/MC/ARM/misaligned-blx.s --- a/llvm/test/MC/ARM/misaligned-blx.s +++ b/llvm/test/MC/ARM/misaligned-blx.s @@ -6,7 +6,7 @@ _f1: bx lr - @ A misalgined ARM destination. + @ A misaligned ARM destination. .arm .globl _misaligned _misaligned: diff --git a/llvm/test/Transforms/DeadArgElim/naked_functions.ll b/llvm/test/Transforms/DeadArgElim/naked_functions.ll --- a/llvm/test/Transforms/DeadArgElim/naked_functions.ll +++ b/llvm/test/Transforms/DeadArgElim/naked_functions.ll @@ -1,6 +1,6 @@ ; RUN: opt -S -deadargelim %s | FileCheck %s -; Don't eliminate dead arugments from naked functions. +; Don't eliminate dead arguments from naked functions. ; CHECK: define internal i32 @naked(i32 %x) define internal i32 @naked(i32 %x) #0 { diff --git a/llvm/test/Transforms/SCCP/apint-basictest3.ll b/llvm/test/Transforms/SCCP/apint-basictest3.ll --- a/llvm/test/Transforms/SCCP/apint-basictest3.ll +++ b/llvm/test/Transforms/SCCP/apint-basictest3.ll @@ -1,5 +1,5 @@ ; This is a basic sanity check for constant propagation. It tests the basic -; arithmatic operations. +; arithmetic operations. ; RUN: opt < %s -sccp -S | not grep mul diff --git a/llvm/test/Transforms/SLPVectorizer/X86/align.ll b/llvm/test/Transforms/SLPVectorizer/X86/align.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/align.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/align.ll @@ -39,7 +39,7 @@ ret void } -; Float has 4 byte abi alignment on x86_64. We must use the alignmnet of the +; Float has 4 byte abi alignment on x86_64. We must use the alignment of the ; value being loaded/stored not the alignment of the pointer type. define void @test2(float * %a, float * %b) { diff --git a/llvm/tools/llvm-c-test/echo.cpp b/llvm/tools/llvm-c-test/echo.cpp --- a/llvm/tools/llvm-c-test/echo.cpp +++ b/llvm/tools/llvm-c-test/echo.cpp @@ -29,8 +29,8 @@ template struct CAPIDenseMap {}; -// The default DenseMapInfo require to know about pointer alignement. -// Because the C API uses opaques pointer types, their alignement is unknown. +// The default DenseMapInfo require to know about pointer alignment. +// Because the C API uses opaques pointer types, their alignment is unknown. // As a result, we need to roll out our own implementation. template struct CAPIDenseMap { diff --git a/llvm/tools/llvm-objdump/MachODump.cpp b/llvm/tools/llvm-objdump/MachODump.cpp --- a/llvm/tools/llvm-objdump/MachODump.cpp +++ b/llvm/tools/llvm-objdump/MachODump.cpp @@ -7730,7 +7730,7 @@ } } // The TripleName's need to be reset if we are called again for a different - // archtecture. + // architecture. TripleName = ""; ThumbTripleName = ""; diff --git a/llvm/unittests/Analysis/VectorFunctionABITest.cpp b/llvm/unittests/Analysis/VectorFunctionABITest.cpp --- a/llvm/unittests/Analysis/VectorFunctionABITest.cpp +++ b/llvm/unittests/Analysis/VectorFunctionABITest.cpp @@ -256,7 +256,7 @@ EXPECT_EQ(Parameters.size(), (unsigned)1); EXPECT_EQ(Parameters[0].Alignment, Align(2)); - // Missing alignement value. + // Missing alignment value. EXPECT_FALSE(invokeParser("_ZGVsM2l2a_sin")); // Invalid alignment token "x". EXPECT_FALSE(invokeParser("_ZGVsM2l2ax_sin")); diff --git a/llvm/unittests/Analysis/VectorUtilsTest.cpp b/llvm/unittests/Analysis/VectorUtilsTest.cpp --- a/llvm/unittests/Analysis/VectorUtilsTest.cpp +++ b/llvm/unittests/Analysis/VectorUtilsTest.cpp @@ -433,7 +433,7 @@ TEST_F(VFShapeAPITest, Parameters_Invalid) { #ifndef NDEBUG - // Wrong order is checked by an asseretion: make sure that the + // Wrong order is checked by an assertion: make sure that the // assertion is not removed. EXPECT_DEATH(validParams({{1, VFParamKind::Vector}}), "Broken parameter list."); diff --git a/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp b/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp --- a/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp +++ b/llvm/unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp @@ -1,4 +1,4 @@ -//=== WebAssemblyExceptionInfoTest.cpp - WebAssebmlyExceptionInfo unit tests =// +//=== WebAssemblyExceptionInfoTest.cpp - WebAssemblyExceptionInfo unit tests =// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/utils/TableGen/CodeGenTarget.h b/llvm/utils/TableGen/CodeGenTarget.h --- a/llvm/utils/TableGen/CodeGenTarget.h +++ b/llvm/utils/TableGen/CodeGenTarget.h @@ -86,12 +86,12 @@ /// Record *getAsmParser() const; - /// getAsmParserVariant - Return the AssmblyParserVariant definition for + /// getAsmParserVariant - Return the AssemblyParserVariant definition for /// this target. /// Record *getAsmParserVariant(unsigned i) const; - /// getAsmParserVariantCount - Return the AssmblyParserVariant definition + /// getAsmParserVariantCount - Return the AssemblyParserVariant definition /// available for this target. /// unsigned getAsmParserVariantCount() const; diff --git a/llvm/utils/TableGen/CodeGenTarget.cpp b/llvm/utils/TableGen/CodeGenTarget.cpp --- a/llvm/utils/TableGen/CodeGenTarget.cpp +++ b/llvm/utils/TableGen/CodeGenTarget.cpp @@ -260,7 +260,7 @@ return LI[AsmParserNum]; } -/// getAsmParserVariant - Return the AssmblyParserVariant definition for +/// getAsmParserVariant - Return the AssemblyParserVariant definition for /// this target. /// Record *CodeGenTarget::getAsmParserVariant(unsigned i) const { @@ -272,7 +272,7 @@ return LI[i]; } -/// getAsmParserVariantCount - Return the AssmblyParserVariant definition +/// getAsmParserVariantCount - Return the AssemblyParserVariant definition /// available for this target. /// unsigned CodeGenTarget::getAsmParserVariantCount() const { diff --git a/llvm/utils/bugpoint/RemoteRunSafely.sh b/llvm/utils/bugpoint/RemoteRunSafely.sh --- a/llvm/utils/bugpoint/RemoteRunSafely.sh +++ b/llvm/utils/bugpoint/RemoteRunSafely.sh @@ -33,7 +33,7 @@ # $2 - number of arguments to shift if [ $1 -lt $2 ] then - echo "Error: Wrong number of argumants." + echo "Error: Wrong number of arguments." printUsageAndExit fi }