diff --git a/mlir/docs/Bindings/Python.md b/mlir/docs/Bindings/Python.md --- a/mlir/docs/Bindings/Python.md +++ b/mlir/docs/Bindings/Python.md @@ -520,7 +520,7 @@ access and printing. The latter provide access to the defining block or operation and the position of the value within it. By default, the generic `Value` objects are returned from IR traversals. Downcasting is implemented -through concrete subclass constructors, similarly to attribtues and types: +through concrete subclass constructors, similarly to attributes and types: ```python from mlir.ir import BlockArgument, OpResult, Value diff --git a/mlir/docs/TargetLLVMIR.md b/mlir/docs/TargetLLVMIR.md --- a/mlir/docs/TargetLLVMIR.md +++ b/mlir/docs/TargetLLVMIR.md @@ -553,7 +553,7 @@ The "bare pointer" calling convention does not support unranked memrefs as their shape cannot be known at compile time. -### Generic alloction and deallocation functions +### Generic allocation and deallocation functions When converting the Memref dialect, allocations and deallocations are converted into calls to `malloc` (`aligned_alloc` if aligned allocations are requested) diff --git a/mlir/include/mlir/Analysis/DataFlowFramework.h b/mlir/include/mlir/Analysis/DataFlowFramework.h --- a/mlir/include/mlir/Analysis/DataFlowFramework.h +++ b/mlir/include/mlir/Analysis/DataFlowFramework.h @@ -379,7 +379,7 @@ /// dependents are placed on the worklist. /// /// The dependency graph does not need to be static. Each invocation of - /// `visit` can add new dependencies, but these dependecies will not be + /// `visit` can add new dependencies, but these dependencies will not be /// dynamically added to the worklist because the solver doesn't know what /// will provide a value for then. virtual LogicalResult visit(ProgramPoint point) = 0; diff --git a/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h b/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h --- a/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h +++ b/mlir/include/mlir/Analysis/Presburger/PWMAFunction.h @@ -74,7 +74,7 @@ /// Remove the specified range of vars. void removeVarRange(VarKind kind, unsigned varStart, unsigned varLimit); - /// Given a MAF `other`, merges local variables such that both funcitons + /// Given a MAF `other`, merges local variables such that both functions /// have union of local vars, without changing the set of points in domain or /// the output. void mergeLocalVars(MultiAffineFunction &other); diff --git a/mlir/include/mlir/Analysis/Presburger/Simplex.h b/mlir/include/mlir/Analysis/Presburger/Simplex.h --- a/mlir/include/mlir/Analysis/Presburger/Simplex.h +++ b/mlir/include/mlir/Analysis/Presburger/Simplex.h @@ -563,7 +563,7 @@ /// negative for all values in the symbol domain, the row needs to be pivoted /// irrespective of the precise value of the symbols. To answer queries like /// "Is this symbolic sample always negative in the symbol domain?", we maintain -/// a `LexSimplex domainSimplex` correponding to the symbol domain. +/// a `LexSimplex domainSimplex` corresponding to the symbol domain. /// /// In other cases, it may be that the symbolic sample is violated at some /// values in the symbol domain and not violated at others. In this case, diff --git a/mlir/include/mlir/Bindings/Python/Attributes.td b/mlir/include/mlir/Bindings/Python/Attributes.td --- a/mlir/include/mlir/Bindings/Python/Attributes.td +++ b/mlir/include/mlir/Bindings/Python/Attributes.td @@ -21,7 +21,7 @@ string pythonType = p; } -// Mappings between supported builtin attribtues and Python types. +// Mappings between supported builtin attributes and Python types. def : PythonAttr<"::mlir::Attribute", "_ods_ir.Attribute">; def : PythonAttr<"::mlir::BoolAttr", "_ods_ir.BoolAttr">; def : PythonAttr<"::mlir::IntegerAttr", "_ods_ir.IntegerAttr">; diff --git a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td --- a/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/AMDGPU.td @@ -51,7 +51,7 @@ The `amdgpu.raw_buffer_load` op is a wrapper around the buffer load intrinsics available on AMD GPUs, including extensions in newer GPUs. - The index into the buffer is computed as for `memref.load` with the additon + The index into the buffer is computed as for `memref.load` with the addition of `indexOffset` and `sgprOffset` (which **may or may not** be considered in bounds checks and includes any offset present on the memref type if it's non-zero). diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td --- a/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td +++ b/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td @@ -84,7 +84,7 @@ GPU operations implementing this interface take a list of dependencies as `gpu.async.token` arguments and optionally return a `gpu.async.token`. - The op doesn't start executing until all depent ops producing the async + The op doesn't start executing until all dependent ops producing the async dependency tokens have finished executing. If the op returns a token, the op merely schedules the execution on the diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -882,7 +882,7 @@ All the threads in the warp must execute the same `mma.sync` operation. For each possible multiplicand PTX data type, there are one or more possible - instruction shapes given as "mMnNkK". The below table describes the posssibilities + instruction shapes given as "mMnNkK". The below table describes the possibilities as well as the types required for the operands. Note that the data type for C (the accumulator) and D (the result) can vary independently when there are multiple possibilities in the "C/D Type" column. @@ -892,7 +892,7 @@ raised. `b1Op` is only relevant when the binary (b1) type is given to - `multiplicandDataType`. It specifies how the multiply-and-acumulate is + `multiplicandDataType`. It specifies how the multiply-and-accumulate is performed and is either `xor_popc` or `and_poc`. The default is `xor_popc`. `intOverflowBehavior` is only relevant when the `multiplicandType` attribute diff --git a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td --- a/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td +++ b/mlir/include/mlir/Dialect/NVGPU/IR/NVGPU.td @@ -12,7 +12,7 @@ // dialects and lower level NVVM dialect. This allow representing PTX specific // operations while using MLIR high level concepts like memref and 2-D vector. // -// Ops semantic are going to be based on vendor specific PTX defintion: +// Ops semantic are going to be based on vendor specific PTX definition: // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html // //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h b/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h --- a/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h +++ b/mlir/include/mlir/Dialect/Vector/Transforms/VectorDistribution.h @@ -20,7 +20,7 @@ /// WarpExecuteOnLane0Op. /// The function needs to return an allocation that the lowering can use as /// temporary memory. The allocation needs to match the shape of the type (the - /// type may be VectorType or a scalar) and be availble for the current warp. + /// type may be VectorType or a scalar) and be available for the current warp. /// If there are several warps running in parallel the allocation needs to be /// split so that each warp has its own allocation. using WarpAllocationFn = diff --git a/mlir/include/mlir/IR/BuiltinAttributes.td b/mlir/include/mlir/IR/BuiltinAttributes.td --- a/mlir/include/mlir/IR/BuiltinAttributes.td +++ b/mlir/include/mlir/IR/BuiltinAttributes.td @@ -1107,7 +1107,7 @@ static FlatSymbolRefAttr get(StringAttr value); static FlatSymbolRefAttr get(MLIRContext *ctx, StringRef value); - /// Convenience getter for buliding a SymbolRefAttr based on an operation + /// Convenience getter for building a SymbolRefAttr based on an operation /// that implements the SymbolTrait. static FlatSymbolRefAttr get(Operation *symbol); diff --git a/mlir/include/mlir/Transforms/Passes.td b/mlir/include/mlir/Transforms/Passes.td --- a/mlir/include/mlir/Transforms/Passes.td +++ b/mlir/include/mlir/Transforms/Passes.td @@ -48,7 +48,7 @@ This is similar (but opposite) to loop-invariant code motion, which hoists operations out of regions executed more than once. The implementation of - control-flow sink uses a simple and conversative cost model: operations are + control-flow sink uses a simple and conservative cost model: operations are never duplicated and are only moved into singly-executed regions. It is recommended to run canonicalization first to remove unreachable diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -340,7 +340,7 @@ global = rewriter.create( loc, globalType, /*isConstant=*/true, LLVM::Linkage::Internal, stringConstName, - rewriter.getStringAttr(formatString), /*allignment=*/0, addressSpace); + rewriter.getStringAttr(formatString), /*alignment=*/0, addressSpace); } // Get a pointer to the format string's first element diff --git a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Detensorize.cpp @@ -55,7 +55,7 @@ }); } -/// A conversion patttern for detensoring `linalg.generic` ops. +/// A conversion pattern for detensoring `linalg.generic` ops. class DetensorizeGenericOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -411,7 +411,7 @@ Block *block = blockArg.getParentBlock(); // For the potentially detensorable block argument, find the - // correpsonding operands in predecessor blocks. + // corresponding operands in predecessor blocks. for (PredecessorIterator pred = block->pred_begin(); pred != block->pred_end(); ++pred) { BranchOpInterface terminator = diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -673,13 +673,13 @@ // • x == 0 -> -INF // • x < 0 -> NAN // • x == +INF -> +INF - Value aproximation = builder.create( + Value approximation = builder.create( zeroMask, cstMinusInf, builder.create( invalidMask, cstNan, builder.create(posInfMask, cstPosInf, x))); - rewriter.replaceOp(op, aproximation); + rewriter.replaceOp(op, approximation); return success(); } diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp @@ -284,7 +284,7 @@ propagateShapesInRegion(func.getBody()); - // Insert UnrealizedConversionCasts to guarantee ReturnOp agress with + // Insert UnrealizedConversionCasts to guarantee ReturnOp agrees with // the FuncOp type. func.walk([&](func::ReturnOp op) { func::FuncOp parent = dyn_cast(op->getParentOp()); diff --git a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp --- a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp +++ b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp @@ -286,7 +286,7 @@ } else { // Update group pending tokens when token will become ready. Because this // will happen asynchronously we must ensure that `group` is alive until - // then, and re-ackquire the lock. + // then, and re-acquire the lock. group->addRef(); token->awaiters.emplace_back([group, onTokenReady]() { diff --git a/mlir/lib/Target/Cpp/TranslateToCpp.cpp b/mlir/lib/Target/Cpp/TranslateToCpp.cpp --- a/mlir/lib/Target/Cpp/TranslateToCpp.cpp +++ b/mlir/lib/Target/Cpp/TranslateToCpp.cpp @@ -108,7 +108,7 @@ /// Emits a label for the block. LogicalResult emitLabel(Block &block); - /// Emits the operands and atttributes of the operation. All operands are + /// Emits the operands and attributes of the operation. All operands are /// emitted first and then all attributes in alphabetical order. LogicalResult emitOperandsAndAttributes(Operation &op, ArrayRef exclude = {}); diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.cpp @@ -320,7 +320,7 @@ kDeviceCopyinFlag | kHoldFlag, flags, names, index, mapperAllocas))) return failure(); - // TODO copyin readonly currenlty handled as copyin. Update when extension + // TODO copyin readonly currently handled as copyin. Update when extension // available. if (failed(processOperands(builder, moduleTranslation, op, op.copyinReadonlyOperands(), totalNbOperand, @@ -333,7 +333,7 @@ kHostCopyoutFlag | kHoldFlag, flags, names, index, mapperAllocas))) return failure(); - // TODO copyout zero currenlty handled as copyout. Update when extension + // TODO copyout zero currently handled as copyout. Update when extension // available. if (failed(processOperands(builder, moduleTranslation, op, op.copyoutZeroOperands(), totalNbOperand, @@ -346,7 +346,7 @@ kCreateFlag | kHoldFlag, flags, names, index, mapperAllocas))) return failure(); - // TODO create zero currenlty handled as create. Update when extension + // TODO create zero currently handled as create. Update when extension // available. if (failed(processOperands(builder, moduleTranslation, op, op.createZeroOperands(), totalNbOperand, diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp --- a/mlir/lib/Transforms/Utils/RegionUtils.cpp +++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp @@ -546,7 +546,7 @@ // If the lhs or rhs has external uses, the blocks cannot be merged as the // merged version of this operation will not be either the lhs or rhs - // alone (thus semantically incorrect), but some mix dependending on which + // alone (thus semantically incorrect), but some mix depending on which // block preceeded this. // TODO allow merging of operations when one block does not dominate the // other diff --git a/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py b/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py --- a/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py +++ b/mlir/python/mlir/dialects/linalg/opdsl/lang/affine.py @@ -101,7 +101,7 @@ if not self.allow_new_dims: raise ValueError( f"New dimensions not allowed in the current affine expression: " - f"Requested '{dimname}', Availble: {self.all_dims}") + f"Requested '{dimname}', Available: {self.all_dims}") pos = len(self.all_dims) self.all_dims[dimname] = pos self.local_dims[dimname] = pos @@ -114,7 +114,7 @@ if not self.allow_new_symbols: raise ValueError( f"New symbols not allowed in the current affine expression: " - f"Requested '{symname}', Availble: {self.all_symbols}") + f"Requested '{symname}', Available: {self.all_symbols}") pos = len(self.all_symbols) self.all_symbols[symname] = pos self.local_symbols[symname] = pos diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir --- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir +++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir @@ -24,7 +24,7 @@ // ----- -// Same as above but with fp32 acumulation type. +// Same as above but with fp32 accumulation type. // CHECK-LABEL: @m16n8k16_fp16_fp32 func.func @m16n8k16_fp16_fp32(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2: vector<2x2xf32>) -> vector<2x2xf32> { diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -947,7 +947,7 @@ class _SparseValueInfo(enum.Enum): """Describes how a sparse tensor value is stored. - _UNPACKED: The sparse tensor value is stored as (coordnates, values) in + _UNPACKED: The sparse tensor value is stored as (coordinates, values) in Python. _PACKED: The sparse tensor value is stored as a C pointer to a packed MLIR sparse tensor. @@ -1165,7 +1165,7 @@ def to_array(self) -> np.ndarray: """Returns the numpy array for the Tensor. - This is currenly only implemented for dense Tensor. + This is currently only implemented for dense Tensor. """ if not self.is_dense(): raise ValueError("Conversion from non-dense Tensor " diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -502,7 +502,7 @@ // {3}: documentation (summary + description) // {4}: op attribute list // {5}: builder methods taking standalone attribute parameters -// {6}: additional method defintions +// {6}: additional method definitions // {7}: additional methods for attributes used by indexing maps static const char structuredOpOdsHeaderFormat[] = R"FMT( //===----------------------------------------------------------------------===// diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp --- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp @@ -434,7 +434,7 @@ using IteratorT = ParameterElement *const *; IteratorT it = params.begin(); - // Find the last required parameter. Commas become optional aftewards. + // Find the last required parameter. Commas become optional afterwards. // Note: IteratorT's copy assignment is deleted. ParameterElement *lastReq = nullptr; for (ParameterElement *param : params) diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -172,7 +172,7 @@ /// This class represents a group of order-independent optional clauses. Each /// clause starts with a literal element and has a coressponding parsing -/// element. A parsing element is a continous sequence of format elements. +/// element. A parsing element is a continuous sequence of format elements. /// Each clause can appear 0 or 1 time. class OIListElement : public DirectiveElementBase { public: