diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -415,7 +415,7 @@ "Support DPP (Data Parallel Primitives) extension" >; -// DPP8 allows arbitrary cross-lane swizzling withing groups of 8 lanes. +// DPP8 allows arbitrary cross-lane swizzling within groups of 8 lanes. def FeatureDPP8 : SubtargetFeature<"dpp8", "HasDPP8", "true", diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h --- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h @@ -76,8 +76,8 @@ }; const ImageDimIntrinsicInfo *getImageDimIntrinsicInfo(unsigned Intr); -const ImageDimIntrinsicInfo *getImageDimInstrinsicByBaseOpcode(unsigned BaseOpcode, - unsigned Dim); +const ImageDimIntrinsicInfo * +getImageDimIntrinsicByBaseOpcode(unsigned BaseOpcode, unsigned Dim); } // end AMDGPU namespace } // End llvm namespace diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h @@ -109,8 +109,8 @@ Register Den) const; void legalizeUnsignedDIV_REM64Impl(MachineIRBuilder &B, Register DstDivReg, - Register DstRemReg, Register Numer, - Register Denom) const; + Register DstRemReg, Register Num, + Register Den) const; bool legalizeSignedDIV_REM(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const; diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -4408,8 +4408,8 @@ if (ConstantLod->isZero() || ConstantLod->isNegative()) { // Set new opcode to _lz variant of _l, and change the intrinsic ID. const AMDGPU::ImageDimIntrinsicInfo *NewImageDimIntr = - AMDGPU::getImageDimInstrinsicByBaseOpcode(LZMappingInfo->LZ, - Intr->Dim); + AMDGPU::getImageDimIntrinsicByBaseOpcode(LZMappingInfo->LZ, + Intr->Dim); // The starting indexes should remain in the same place. --CorrectedNumVAddrs; diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp @@ -125,7 +125,7 @@ BasicBlock::iterator getEntryIns(CallInst * UI); // Insert an Alloc instruction. AllocaInst* insertAlloca(CallInst * UI, IRBuilder<> &B, const char *prefix); - // Get a scalar native builtin signle argument FP function + // Get a scalar native builtin single argument FP function FunctionCallee getNativeFunction(Module *M, const FuncInfo &FInfo); protected: diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULibFunc.cpp @@ -455,7 +455,8 @@ break; } - default: llvm_unreachable("Unhandeled param rule"); + default: + llvm_unreachable("Unhandled param rule"); } } ++Index; @@ -747,7 +748,8 @@ case AMDGPULibFunc::IMG3D: return "11ocl_image3d"; case AMDGPULibFunc::SAMPLER: return "11ocl_sampler"; case AMDGPULibFunc::EVENT: return "9ocl_event"; - default: llvm_unreachable("Unhandeled param type"); + default: + llvm_unreachable("Unhandled param type"); } return nullptr; } @@ -761,7 +763,7 @@ // substitution candidates from the grammar, but are explicitly excluded: // 1. other than vendor extended types ..." -// For the purpose of functions the following productions make sence for the +// For the purpose of functions the following productions make sense for the // substitution: // ::= // ::= @@ -774,8 +776,8 @@ // using production rule they're not used for substitution // because clang consider them as builtin types. // -// DvNN_ type is GCC extension for vectors and is a subject for the substitution. - +// DvNN_ type is GCC extension for vectors and is a subject for the +// substitution. class ItaniumMangler { SmallVector Str; // list of accumulated substitutions @@ -902,7 +904,7 @@ case AMDGPULibFunc::EVENT: T = StructType::create(C,"ocl_event")->getPointerTo(); break; default: - llvm_unreachable("Unhandeled param type"); + llvm_unreachable("Unhandled param type"); return nullptr; } if (P.VectorSize > 1) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUOpenCLEnqueuedBlockLowering.cpp @@ -13,7 +13,7 @@ // // In LLVM CodeGen the runtime-handle metadata will be translated to // RuntimeHandle metadata in code object. Runtime allocates a global buffer -// for each kernel with RuntimeHandel metadata and saves the kernel address +// for each kernel with RuntimeHandle metadata and saves the kernel address // required for the AQL packet into the buffer. __enqueue_kernel function // in device library knows that the invoke function pointer in the block // literal is actually runtime handle and loads the kernel address from it diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -274,7 +274,7 @@ // We could do a single 64-bit load here, but it's likely that the basic // 32-bit and extract sequence is already present, and it is probably easier - // to CSE this. The loads should be mergable later anyway. + // to CSE this. The loads should be mergeable later anyway. Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -251,7 +251,7 @@ uint64_t getExplicitKernArgSize(const Function &F, Align &MaxAlign) const; unsigned getKernArgSegmentSize(const Function &F, Align &MaxAlign) const; - /// \returns Corresponsing DWARF register number mapping flavour for the + /// \returns Corresponding DWARF register number mapping flavour for the /// \p WavefrontSize. AMDGPUDwarfFlavour getAMDGPUDwarfFlavour() const; diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -1545,7 +1545,7 @@ } int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) { - // On gfx90a+ releveant hazards are checked in checkMAIVALUHazards() + // On gfx90a+ relevant hazards are checked in checkMAIVALUHazards() if (!ST.hasMAIInsts() || ST.hasGFX90AInsts()) return 0; diff --git a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp --- a/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp @@ -188,7 +188,7 @@ printRegion(dbgs(), Rgn.Begin, Rgn.End, Sch.LIS, 2)); Sch.BaseClass::schedule(); - // Unfortunatelly placeDebugValues incorrectly modifies RegionEnd, restore + // Unfortunately placeDebugValues incorrectly modifies RegionEnd, restore Sch.RegionEnd = Rgn.End; //assert(Rgn.End == Sch.RegionEnd); Rgn.Begin = Sch.RegionBegin; @@ -280,7 +280,7 @@ return RPTracker.moveMaxPressure(); } -void GCNIterativeScheduler::enterRegion(MachineBasicBlock *BB, // overriden +void GCNIterativeScheduler::enterRegion(MachineBasicBlock *BB, // overridden MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned NumRegionInstrs) { @@ -293,7 +293,7 @@ } } -void GCNIterativeScheduler::schedule() { // overriden +void GCNIterativeScheduler::schedule() { // overridden // do nothing LLVM_DEBUG(printLivenessInfo(dbgs(), RegionBegin, RegionEnd, LIS); if (!Regions.empty() && Regions.back()->Begin == RegionBegin) { @@ -304,7 +304,7 @@ << '\n';); } -void GCNIterativeScheduler::finalizeSchedule() { // overriden +void GCNIterativeScheduler::finalizeSchedule() { // overridden if (Regions.empty()) return; switch (Strategy) { @@ -391,8 +391,8 @@ // and already interleaved with debug values if (!std::is_same::value) { placeDebugValues(); - // Unfortunatelly placeDebugValues incorrectly modifies RegionEnd, restore - //assert(R.End == RegionEnd); + // Unfortunately placeDebugValues incorrectly modifies RegionEnd, restore + // assert(R.End == RegionEnd); RegionEnd = R.End; } diff --git a/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp --- a/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp +++ b/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// This file defines and imlements the class GCNMinRegScheduler, which +/// This file defines and implements the class GCNMinRegScheduler, which /// implements an experimental, simple scheduler whose main goal is to learn /// ways about consuming less possible registers for a region. /// diff --git a/llvm/lib/Target/AMDGPU/GCNPreRAOptimizations.cpp b/llvm/lib/Target/AMDGPU/GCNPreRAOptimizations.cpp --- a/llvm/lib/Target/AMDGPU/GCNPreRAOptimizations.cpp +++ b/llvm/lib/Target/AMDGPU/GCNPreRAOptimizations.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// // /// \file -/// This pass combines split register tuple initialization into a single psuedo: +/// This pass combines split register tuple initialization into a single pseudo: /// /// undef %0.sub1:sreg_64 = S_MOV_B32 1 /// %0.sub0:sreg_64 = S_MOV_B32 2 diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp --- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -121,7 +121,7 @@ // Register pressure is considered 'CRITICAL' if it is approaching a value // that would reduce the wave occupancy for the execution unit. When - // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both + // register pressure is 'CRITICAL', increasing SGPR and VGPR pressure both // has the same cost, so we don't need to prefer one over the other. int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit; diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h --- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h +++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h @@ -60,7 +60,7 @@ /// where we setup the InstrWaitCntInfo vector. /// The core logic for determining which CNTs an instruction /// interacts with is taken from SIInsertWaitcnts::updateEventWaitcntAfter(). - /// Unfortunately, some of the logic from that function is not avalable to us + /// Unfortunately, some of the logic from that function is not available to us /// in this scope so we conservatively end up assuming that some /// instructions interact with more CNTs than they do in reality. void generateWaitCntInfo(); diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td --- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td +++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td @@ -1108,7 +1108,7 @@ let PrimaryKeyEarlyOut = 1; } -def getImageDimInstrinsicByBaseOpcode : SearchIndex { +def getImageDimIntrinsicByBaseOpcode : SearchIndex { let Table = ImageDimIntrinsicTable; let Key = ["BaseOpcode", "Dim"]; } diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -926,7 +926,7 @@ std::swap(LHS, RHS); CC = DAG.getCondCode(CCSwapped); } else { - // Try inverting the conditon and then swapping the operands + // Try inverting the condition and then swapping the operands ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT); CCSwapped = ISD::getSetCCSwappedOperands(CCInv); if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) { diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.h b/llvm/lib/Target/AMDGPU/R600InstrInfo.h --- a/llvm/lib/Target/AMDGPU/R600InstrInfo.h +++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.h @@ -175,7 +175,7 @@ int *BytesAdded = nullptr) const override; unsigned removeBranch(MachineBasicBlock &MBB, - int *BytesRemvoed = nullptr) const override; + int *BytesRemoved = nullptr) const override; bool isPredicated(const MachineInstr &MI) const override; diff --git a/llvm/lib/Target/AMDGPU/R600Instructions.td b/llvm/lib/Target/AMDGPU/R600Instructions.td --- a/llvm/lib/Target/AMDGPU/R600Instructions.td +++ b/llvm/lib/Target/AMDGPU/R600Instructions.td @@ -1346,7 +1346,7 @@ //===----------------------------------------------------------------------===// -// Regist loads and stores - for indirect addressing +// Register loads and stores - for indirect addressing //===----------------------------------------------------------------------===// let Namespace = "R600" in { diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.h b/llvm/lib/Target/AMDGPU/R600MachineScheduler.h --- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.h +++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.h @@ -63,7 +63,7 @@ int InstKindLimit[IDLast]; - int OccupedSlotsMask; + int OccupiedSlotsMask; public: R600SchedStrategy() = default; diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp --- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp @@ -29,7 +29,7 @@ MRI = &DAG->MRI; CurInstKind = IDOther; CurEmitted = 0; - OccupedSlotsMask = 31; + OccupiedSlotsMask = 31; InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); InstKindLimit[IDOther] = 32; InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); @@ -138,7 +138,7 @@ if (NextInstKind != CurInstKind) { LLVM_DEBUG(dbgs() << "Instruction Type Switch\n"); if (NextInstKind != IDAlu) - OccupedSlotsMask |= 31; + OccupiedSlotsMask |= 31; CurEmitted = 0; CurInstKind = NextInstKind; } @@ -339,10 +339,10 @@ void R600SchedStrategy::PrepareNextSlot() { LLVM_DEBUG(dbgs() << "New Slot\n"); - assert (OccupedSlotsMask && "Slot wasn't filled"); - OccupedSlotsMask = 0; -// if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS) -// OccupedSlotsMask |= 16; + assert(OccupiedSlotsMask && "Slot wasn't filled"); + OccupiedSlotsMask = 0; + // if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS) + // OccupiedSlotsMask |= 16; InstructionsGroupCandidate.clear(); LoadAlu(); } @@ -400,41 +400,41 @@ SUnit* R600SchedStrategy::pickAlu() { while (AvailablesAluCount() || !Pending[IDAlu].empty()) { - if (!OccupedSlotsMask) { + if (!OccupiedSlotsMask) { // Bottom up scheduling : predX must comes first if (!AvailableAlus[AluPredX].empty()) { - OccupedSlotsMask |= 31; + OccupiedSlotsMask |= 31; return PopInst(AvailableAlus[AluPredX], false); } // Flush physical reg copies (RA will discard them) if (!AvailableAlus[AluDiscarded].empty()) { - OccupedSlotsMask |= 31; + OccupiedSlotsMask |= 31; return PopInst(AvailableAlus[AluDiscarded], false); } // If there is a T_XYZW alu available, use it if (!AvailableAlus[AluT_XYZW].empty()) { - OccupedSlotsMask |= 15; + OccupiedSlotsMask |= 15; return PopInst(AvailableAlus[AluT_XYZW], false); } } - bool TransSlotOccuped = OccupedSlotsMask & 16; - if (!TransSlotOccuped && VLIW5) { + bool TransSlotOccupied = OccupiedSlotsMask & 16; + if (!TransSlotOccupied && VLIW5) { if (!AvailableAlus[AluTrans].empty()) { - OccupedSlotsMask |= 16; + OccupiedSlotsMask |= 16; return PopInst(AvailableAlus[AluTrans], false); } SUnit *SU = AttemptFillSlot(3, true); if (SU) { - OccupedSlotsMask |= 16; + OccupiedSlotsMask |= 16; return SU; } } for (int Chan = 3; Chan > -1; --Chan) { - bool isOccupied = OccupedSlotsMask & (1 << Chan); + bool isOccupied = OccupiedSlotsMask & (1 << Chan); if (!isOccupied) { SUnit *SU = AttemptFillSlot(Chan, false); if (SU) { - OccupedSlotsMask |= (1 << Chan); + OccupiedSlotsMask |= (1 << Chan); InstructionsGroupCandidate.push_back(SU->getInstr()); return SU; } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -228,7 +228,7 @@ MachineOperand &Mod = MI->getOperand(ModIdx); unsigned Val = Mod.getImm(); if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) { - // Only apply the following transformation if that operand requries + // Only apply the following transformation if that operand requires // a packed immediate. switch (TII.get(Opcode).OpInfo[OpNo].OperandType) { case AMDGPU::OPERAND_REG_IMM_V2FP16: @@ -688,7 +688,7 @@ // Don't fold into a copy to a physical register with the same class. Doing // so would interfere with the register coalescer's logic which would avoid - // redundant initalizations. + // redundant initializations. if (DestReg.isPhysical() && SrcRC->contains(DestReg)) return; @@ -902,7 +902,7 @@ tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII); // FIXME: We could try to change the instruction from 64-bit to 32-bit - // to enable more folding opportunites. The shrink operands pass + // to enable more folding opportunities. The shrink operands pass // already does this. return; } diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -73,7 +73,7 @@ // Class of object that encapsulates latest instruction counter score // associated with the operand. Used for determining whether -// s_waitcnt instruction needs to be emited. +// s_waitcnt instruction needs to be emitted. #define CNT_MASK(t) (1u << (t)) diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp --- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -146,7 +146,7 @@ if (!AddrOp->isReg()) return false; - // TODO: We should be able to merge physical reg addreses. + // TODO: We should be able to merge physical reg addresses. if (AddrOp->getReg().isPhysical()) return false; @@ -652,7 +652,7 @@ } // This function assumes that \p A and \p B have are identical except for -// size and offset, and they referecne adjacent memory. +// size and offset, and they reference adjacent memory. static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF, const MachineMemOperand *A, const MachineMemOperand *B) { diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp --- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -13,7 +13,7 @@ /// All control flow is handled using predicated instructions and /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs -/// by writting to the 64-bit EXEC register (each bit corresponds to a +/// by writing to the 64-bit EXEC register (each bit corresponds to a /// single vector ALU). Typically, for predicates, a vector ALU will write /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each /// Vector ALU) and then the ScalarALU will AND the VCC register with the @@ -38,7 +38,8 @@ /// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch /// /// label0: -/// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0 // Restore the exec mask for the Then block +/// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0 // Restore the exec mask for the Then +/// // block /// %exec = S_XOR_B64 %sgpr0, %exec // Update the exec mask /// S_BRANCH_EXECZ label1 // Use our branch optimization /// // instruction again. diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp --- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp +++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp @@ -368,7 +368,7 @@ } // All those frame indices which are dead by now should be removed from the - // function frame. Othewise, there is a side effect such as re-mapping of + // function frame. Otherwise, there is a side effect such as re-mapping of // free frame index ids by the later pass(es) like "stack slot coloring" // which in turn could mess-up with the book keeping of "frame index to VGPR // lane". diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.h b/llvm/lib/Target/AMDGPU/SIMachineScheduler.h --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.h +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.h @@ -72,7 +72,7 @@ // store the live virtual and real registers. // We do care only of SGPR32 and VGPR32 and do track only virtual registers. // Pressure of additional registers required inside the block. - std::vector InternalAdditionnalPressure; + std::vector InternalAdditionalPressure; // Pressure of input and output registers std::vector LiveInPressure; std::vector LiveOutPressure; @@ -153,8 +153,8 @@ // Needs the block to be scheduled inside // TODO: find a way to compute it. - std::vector &getInternalAdditionnalRegUsage() { - return InternalAdditionnalPressure; + std::vector &getInternalAdditionalRegUsage() { + return InternalAdditionalPressure; } std::set &getInRegs() { return LiveInRegs; } diff --git a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp --- a/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -403,7 +403,7 @@ } // TODO: compute InternalAdditionnalPressure. - InternalAdditionnalPressure.resize(TopPressure.MaxSetPressure.size()); + InternalAdditionalPressure.resize(TopPressure.MaxSetPressure.size()); // Check everything is right. #ifndef NDEBUG diff --git a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp --- a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp +++ b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp @@ -225,7 +225,7 @@ // RequirePending is used to indicate whether we are collecting the initial // requirements for the block, and need to defer the first InsertionPoint to // Phase 3. It is set to false once we have set FirstInsertionPoint, or when - // we discover an explict setreg that means this block doesn't have any + // we discover an explicit setreg that means this block doesn't have any // initial requirements. bool RequirePending = true; Status IPChange; diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp --- a/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp +++ b/llvm/lib/Target/AMDGPU/SIOptimizeVGPRLiveRange.cpp @@ -11,7 +11,7 @@ /// structures and waterfall loops. /// /// When we do structurization, we usually transform an if-else into two -/// sucessive if-then (with a flow block to do predicate inversion). Consider a +/// successive if-then (with a flow block to do predicate inversion). Consider a /// simple case after structurization: A divergent value %a was defined before /// if-else and used in both THEN (use in THEN is optional) and ELSE part: /// bb.if: diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -365,7 +365,7 @@ if (Dst && DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) { - // This will work if the tied src is acessing WORD_0, and the dst is + // This will work if the tied src is accessing WORD_0, and the dst is // writing WORD_1. Modifiers don't matter because all the bits that // would be impacted are being overwritten by the dst. // Any other case will not work. diff --git a/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp b/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp --- a/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp +++ b/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp @@ -8,7 +8,7 @@ // /// \file /// This pass creates bundles of memory instructions to protect adjacent loads -/// and stores from beeing rescheduled apart from each other post-RA. +/// and stores from being rescheduled apart from each other post-RA. /// //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp --- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp +++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp @@ -174,7 +174,7 @@ MI.setDesc(TII->get(AMDGPU::S_BRANCH)); } else if (IsVCCZ && MaskValue == 0) { // Will always branch - // Remove all succesors shadowed by new unconditional branch + // Remove all successors shadowed by new unconditional branch MachineBasicBlock *Parent = MI.getParent(); SmallVector ToRemove; bool Found = false; diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td @@ -834,7 +834,7 @@ // This is not a real register. This is just to have a register to add // to VReg_1 that does not alias any real register that would -// introduce inferred register classess. +// introduce inferred register classes. def ARTIFICIAL_VGPR : SIReg <"invalid vgpr", 0> { let isArtificial = 1; } diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -188,7 +188,7 @@ return; // eq/ne is special because the imm16 can be treated as signed or unsigned, - // and initially selectd to the unsigned versions. + // and initially selected to the unsigned versions. if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { bool HasUImm; if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) { diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -771,7 +771,7 @@ /// Is this floating-point operand? bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo); -/// Does this opearnd support only inlinable literals? +/// Does this operand support only inlinable literals? bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo); /// Get the size in bits of a register from the register class \p RC. diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h @@ -48,7 +48,7 @@ /// as an use within some instruction (either from kernel or from non-kernel). bool hasUserInstruction(const GlobalValue *GV); -/// \returns true if an LDS global requres lowering to a module LDS structure +/// \returns true if an LDS global requires lowering to a module LDS structure /// if \p F is not given. If \p F is given it must be a kernel and function /// \returns true if an LDS global is directly used from that kernel and it /// is safe to replace its uses with a kernel LDS structure member.