diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp --- a/mlir/lib/Analysis/AffineStructures.cpp +++ b/mlir/lib/Analysis/AffineStructures.cpp @@ -3346,7 +3346,7 @@ newSyms->append(syms.begin(), syms.end()); } - for (auto operand : llvm::enumerate(operands)) { + for (const auto &operand : llvm::enumerate(operands)) { // Compute replacement dim/sym of operand. AffineExpr replacement; auto dimIt = std::find(dims.begin(), dims.end(), operand.value()); diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp --- a/mlir/lib/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Analysis/LoopAnalysis.cpp @@ -353,7 +353,8 @@ // Work backwards over the body of the block so that the shift of a use's // ancestor operation in the block gets recorded before it's looked up. DenseMap forBodyShift; - for (auto it : llvm::enumerate(llvm::reverse(forBody->getOperations()))) { + for (const auto &it : + llvm::enumerate(llvm::reverse(forBody->getOperations()))) { auto &op = it.value(); // Get the index of the current operation, note that we are iterating in diff --git a/mlir/lib/Analysis/NumberOfExecutions.cpp b/mlir/lib/Analysis/NumberOfExecutions.cpp --- a/mlir/lib/Analysis/NumberOfExecutions.cpp +++ b/mlir/lib/Analysis/NumberOfExecutions.cpp @@ -52,7 +52,7 @@ // Query RegionBranchOpInterface interface if it is available. if (auto regionInterface = dyn_cast(parentOp)) { SmallVector operands(parentOp->getNumOperands()); - for (auto operandIt : llvm::enumerate(parentOp->getOperands())) + for (const auto &operandIt : llvm::enumerate(parentOp->getOperands())) matchPattern(operandIt.value(), m_Constant(&operands[operandIt.index()])); regionInterface.getNumRegionInvocations(operands, numRegionsInvocations); diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -86,7 +86,7 @@ if (filter && !filter(op)) return; - for (auto en : llvm::enumerate(op->getOperands())) { + for (const auto &en : llvm::enumerate(op->getOperands())) { auto operand = en.value(); if (auto *definingOp = operand.getDefiningOp()) { if (backwardSlice->count(definingOp) == 0) diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -1154,7 +1154,7 @@ resultTypes.reserve(resultTypeList.size()); if (resultSegmentSpecObj.is_none()) { // Non-variadic result unpacking. - for (auto it : llvm::enumerate(resultTypeList)) { + for (const auto &it : llvm::enumerate(resultTypeList)) { try { resultTypes.push_back(py::cast(it.value())); if (!resultTypes.back()) @@ -1178,7 +1178,7 @@ .str()); } resultSegmentLengths.reserve(resultTypeList.size()); - for (auto it : + for (const auto &it : llvm::enumerate(llvm::zip(resultTypeList, resultSegmentSpec))) { int segmentSpec = std::get<1>(it.value()); if (segmentSpec == 1 || segmentSpec == 0) { @@ -1239,7 +1239,7 @@ operands.reserve(operands.size()); if (operandSegmentSpecObj.is_none()) { // Non-sized operand unpacking. - for (auto it : llvm::enumerate(operandList)) { + for (const auto &it : llvm::enumerate(operandList)) { try { operands.push_back(py::cast(it.value())); if (!operands.back()) @@ -1263,7 +1263,7 @@ .str()); } operandSegmentLengths.reserve(operandList.size()); - for (auto it : + for (const auto &it : llvm::enumerate(llvm::zip(operandList, operandSegmentSpec))) { int segmentSpec = std::get<1>(it.value()); if (segmentSpec == 1 || segmentSpec == 0) { diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -22,7 +22,7 @@ SmallVector workgroupBuffers; workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions()); - for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) { + for (const auto &en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) { Value attribution = en.value(); auto type = attribution.getType().dyn_cast(); @@ -89,7 +89,7 @@ if (!workgroupBuffers.empty()) zero = rewriter.create(loc, i32Type, rewriter.getI32IntegerAttr(0)); - for (auto en : llvm::enumerate(workgroupBuffers)) { + for (const auto &en : llvm::enumerate(workgroupBuffers)) { LLVM::GlobalOp global = en.value(); Value address = rewriter.create(loc, global); auto elementType = @@ -112,7 +112,7 @@ // Rewrite private memory attributions to alloca'ed buffers. unsigned numWorkgroupAttributions = gpuFuncOp.getNumWorkgroupAttributions(); auto int64Ty = IntegerType::get(rewriter.getContext(), 64); - for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) { + for (const auto &en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) { Value attribution = en.value(); auto type = attribution.getType().cast(); assert(type && type.hasStaticShape() && "unexpected type in attribution"); diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp --- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -634,7 +634,7 @@ arraySize, /*alignment=*/0); auto zero = builder.create(loc, llvmInt32Type, builder.getI32IntegerAttr(0)); - for (auto en : llvm::enumerate(arguments)) { + for (const auto &en : llvm::enumerate(arguments)) { auto index = builder.create( loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); auto fieldPtr = builder.create( diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp --- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp +++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp @@ -206,7 +206,7 @@ // LowerABIAttributesPass. TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs()); { - for (auto argType : enumerate(funcOp.getType().getInputs())) { + for (const auto &argType : enumerate(funcOp.getType().getInputs())) { auto convertedType = typeConverter.convertType(argType.value()); signatureConverter.addInputs(argType.index(), convertedType); } diff --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp --- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp +++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp @@ -222,7 +222,7 @@ Value descriptorSet = builder.create( loc, getInt32Type(), builder.getI32IntegerAttr(0)); - for (auto en : + for (const auto &en : llvm::enumerate(cInterfaceVulkanLaunchCallOp.getOperands().drop_front( kVulkanLaunchNumConfigOperands))) { // Create LLVM constant for the descriptor binding index. diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp --- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp +++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp @@ -213,11 +213,11 @@ createIndexConstant(rewriter, loc, 0)); // Fields 4: Sizes. - for (auto en : llvm::enumerate(sizes)) + for (const auto &en : llvm::enumerate(sizes)) memRefDescriptor.setSize(rewriter, loc, en.index(), en.value()); // Field 5: Strides. - for (auto en : llvm::enumerate(strides)) + for (const auto &en : llvm::enumerate(strides)) memRefDescriptor.setStride(rewriter, loc, en.index(), en.value()); return memRefDescriptor; diff --git a/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp b/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp --- a/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp +++ b/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp @@ -101,7 +101,7 @@ // For this unrolled `position` corresponding to the `linearIndex`^th // element, extract operand vectors SmallVector extractedOperands; - for (auto operand : llvm::enumerate(operands)) { + for (const auto &operand : llvm::enumerate(operands)) { extractedOperands.push_back(rewriter.create( loc, operand1DVectorTypes[operand.index()], operand.value(), position)); diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -1417,7 +1417,8 @@ targetMemRef.setOffset(rewriter, loc, viewMemRef.offset(rewriter, loc)); // Iterate over the dimensions and apply size/stride permutation. - for (auto en : llvm::enumerate(transposeOp.permutation().getResults())) { + for (const auto &en : + llvm::enumerate(transposeOp.permutation().getResults())) { int sourcePos = en.index(); int targetPos = en.value().cast().getPosition(); targetMemRef.setSize(rewriter, loc, targetPos, diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -736,7 +736,7 @@ bool seenVariableLength = false; Type valueTy = builder.getType(); Type valueRangeTy = pdl::RangeType::get(valueTy); - for (auto it : llvm::enumerate(resultTys)) { + for (const auto &it : llvm::enumerate(resultTys)) { Value &type = rewriteValues[it.value()]; if (type) continue; @@ -862,7 +862,7 @@ // Otherwise, handle inference for each of the result types individually. OperandRange resultTypeValues = op.types(); types.reserve(resultTypeValues.size()); - for (auto it : llvm::enumerate(resultTypeValues)) { + for (const auto &it : llvm::enumerate(resultTypeValues)) { Value resultType = it.value(); // Check for an already translated value. diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp @@ -162,7 +162,7 @@ builder.getAllOperands(opPos)); } else { bool foundVariableLength = false; - for (auto operandIt : llvm::enumerate(operands)) { + for (const auto &operandIt : llvm::enumerate(operands)) { bool isVariadic = operandIt.value().getType().isa(); foundVariableLength |= isVariadic; @@ -460,7 +460,7 @@ } // Default case: visit all the operands. - for (auto p : llvm::enumerate(operationOp.operands())) + for (const auto &p : llvm::enumerate(operationOp.operands())) toVisit.emplace(p.value(), entry.value, p.index(), entry.depth + 1); }) diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp --- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp +++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp @@ -261,7 +261,7 @@ builder.setInsertionPointToStart(&launchOp.body().front()); auto *lbArgumentIt = lbs.begin(); auto *stepArgumentIt = steps.begin(); - for (auto en : llvm::enumerate(ivs)) { + for (const auto &en : llvm::enumerate(ivs)) { Value id = en.index() < numBlockDims ? getDim3Value(launchOp.getBlockIds(), en.index()) diff --git a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp --- a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp +++ b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp @@ -387,7 +387,7 @@ // the before region, which may not matching the whole op's result. Instead, // the scf.condition op returns values matching the whole op's results. So we // need to create/load/store variables according to that. - for (auto it : llvm::enumerate(condArgs)) { + for (const auto &it : llvm::enumerate(condArgs)) { auto res = it.value(); auto i = it.index(); auto pointerType = diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp --- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp @@ -208,7 +208,7 @@ SmallVector copyInfo; auto numKernelOperands = launchOp.getNumKernelOperands(); auto kernelOperands = adaptor.getOperands().take_back(numKernelOperands); - for (auto operand : llvm::enumerate(kernelOperands)) { + for (const auto &operand : llvm::enumerate(kernelOperands)) { // Check if the kernel's operand is a ranked memref. auto memRefType = launchOp.getKernelOperand(operand.index()) .getType() diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -254,7 +254,7 @@ rewriter.getNamedAttr(function_like_impl::getArgDictAttrName(), rewriter.getArrayAttr(newArgAttrs))); } - for (auto pair : llvm::enumerate(attributes)) { + for (const auto &pair : llvm::enumerate(attributes)) { if (pair.value().getName() == "llvm.linkage") { attributes.erase(attributes.begin() + pair.index()); break; diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -725,7 +725,7 @@ SmallVector newShape; SmallVector affineExprs; newShape.reserve(type.getRank()); - for (auto it : llvm::enumerate(type.getShape())) { + for (const auto &it : llvm::enumerate(type.getShape())) { if (it.value() == resultTy.getDimSize(it.index())) { newShape.push_back(it.value()); affineExprs.push_back( @@ -1716,7 +1716,7 @@ SmallVector inputExprs; inputExprs.resize(resultTy.getRank()); auto operandTy = input.getType().cast(); - for (auto permutation : llvm::enumerate(perms.getValues())) { + for (const auto &permutation : llvm::enumerate(perms.getValues())) { auto index = permutation.index(); auto value = permutation.value().getZExtValue(); if (!operandTy.hasRank() || operandTy.isDynamicDim(index)) { diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -451,7 +451,7 @@ llvm::DenseMap &valueMapping) { SmallVector newOperands; SmallVector> argMapping; - for (auto operand : llvm::enumerate(op.getIterOperands())) { + for (const auto &operand : llvm::enumerate(op.getIterOperands())) { auto it = valueMapping.find(operand.value()); if (it == valueMapping.end()) continue; @@ -476,7 +476,7 @@ OpBuilder b(op); auto loop = cast(op->getParentOp()); auto yieldOperands = llvm::to_vector<4>(op.getOperands()); - for (auto operand : llvm::enumerate(op.getOperands())) { + for (const auto &operand : llvm::enumerate(op.getOperands())) { auto it = valueMapping.find(operand.value()); if (it == valueMapping.end()) continue; diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -497,7 +497,7 @@ eltType = llvmType.cast().getElementType(); Value insert = rewriter.create(loc, llvmType); int64_t insPos = 0; - for (auto en : llvm::enumerate(maskArrayAttr)) { + for (const auto &en : llvm::enumerate(maskArrayAttr)) { int64_t extPos = en.value().cast().getInt(); Value value = adaptor.v1(); if (extPos >= v1Dim) { @@ -883,7 +883,8 @@ desc.setOffset(rewriter, loc, zero); // Fill size and stride descriptors in memref. - for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { + for (const auto &indexedSize : + llvm::enumerate(targetMemRefType.getShape())) { int64_t index = indexedSize.index(); auto sizeAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -681,7 +681,7 @@ for (auto *container : {&dims, &syms}) { bool isDim = (container == &dims); auto &repls = isDim ? dimReplacements : symReplacements; - for (auto en : llvm::enumerate(*container)) { + for (const auto &en : llvm::enumerate(*container)) { Value v = en.value(); if (!v) { assert(isDim ? !map->isFunctionOfDim(en.index()) diff --git a/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp b/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp --- a/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp +++ b/mlir/lib/Dialect/Async/Transforms/AsyncRuntimeRefCounting.cpp @@ -418,7 +418,7 @@ continue; // Update terminator `successor` block to `refCountingBlock`. - for (auto pair : llvm::enumerate(terminator->getSuccessors())) + for (const auto &pair : llvm::enumerate(terminator->getSuccessors())) if (pair.value() == successor) terminator->setSuccessor(refCountingBlock, pair.index()); } diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -940,7 +940,7 @@ .attachNote(function.getLoc()) .append("return type declared here"); - for (auto pair : llvm::enumerate( + for (const auto &pair : llvm::enumerate( llvm::zip(function.getType().getResults(), returnOp.operands()))) { Type type; Value operand; diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp --- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp @@ -54,7 +54,7 @@ createForAllDimensions(builder, loc, indexOps); // Replace the leading 12 function args with the respective thread/block index // operations. Iterate backwards since args are erased and indices change. - for (auto indexOp : enumerate(indexOps)) + for (const auto &indexOp : enumerate(indexOps)) map.map(firstBlock.getArgument(indexOp.index()), indexOp.value()); } @@ -173,7 +173,7 @@ // Map arguments from gpu.launch region to the arguments of the gpu.func // operation. Block &entryBlock = outlinedFuncBody.front(); - for (auto operand : enumerate(operands)) + for (const auto &operand : enumerate(operands)) map.map(operand.value(), entryBlock.getArgument(operand.index())); // Clone the region of the gpu.launch operation into the gpu.func operation. diff --git a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp --- a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp @@ -89,7 +89,7 @@ }); // Map the innermost loops to threads in reverse order. - for (auto en : + for (const auto &en : llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back( GPUDialect::getNumWorkgroupDimensions())))) { Value v = en.value(); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -1484,7 +1484,7 @@ // list is parsed, returns -1. static int parseOptionalKeywordAlternative(OpAsmParser &parser, ArrayRef keywords) { - for (auto en : llvm::enumerate(keywords)) { + for (const auto &en : llvm::enumerate(keywords)) { if (succeeded(parser.parseOptionalKeyword(en.value()))) return en.index(); } diff --git a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp --- a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp +++ b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp @@ -103,7 +103,7 @@ LinalgDependenceGraph::LinalgDependenceGraph(Aliases &aliases, ArrayRef ops) : aliases(aliases), linalgOps(ops.begin(), ops.end()) { - for (auto en : llvm::enumerate(linalgOps)) { + for (const auto &en : llvm::enumerate(linalgOps)) { linalgOpPositions.insert( std::make_pair(en.value().getOperation(), en.index())); } diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -1093,7 +1093,7 @@ return op.emitError("expected the block to have ") << rank << " arguments"; // Note: the number and type of yield values are checked in the YieldOp. - for (auto en : llvm::enumerate(block.getArgumentTypes())) { + for (const auto &en : llvm::enumerate(block.getArgumentTypes())) { if (!en.value().isIndex()) return op.emitOpError("expected block argument ") << (en.index() + 1) << " to be an index"; @@ -1204,7 +1204,7 @@ SmallVector low, high; auto rankedTensorType = type.cast(); assert(rankedTensorType.hasStaticShape()); - for (auto en : enumerate(rankedTensorType.getShape())) { + for (const auto &en : enumerate(rankedTensorType.getShape())) { AffineExpr d0; bindDims(b.getContext(), d0); auto dimOp = b.createOrFold(loc, source, en.index()); @@ -1275,7 +1275,7 @@ // Initialize all the ranges to {zero, one, one}. All the `ub`s are // overwritten. SmallVector loopRanges(reifiedShapes[0].size(), {zero, one, one}); - for (auto ub : enumerate(reifiedShapes[0])) + for (const auto &ub : enumerate(reifiedShapes[0])) loopRanges[ub.index()].size = ub.value(); return loopRanges; } @@ -2001,7 +2001,7 @@ // Store ids of the corresponding old and new input operands. SmallVector oldInputIdToNew(tiledLoop.inputs().size(), kNoMatch); - for (auto en : llvm::enumerate( + for (const auto &en : llvm::enumerate( llvm::zip(tiledLoop.inputs(), tiledLoop.getRegionInputArgs()))) { Value in, bbArg; size_t index = en.index(); @@ -2215,7 +2215,7 @@ SmallVector oldResultIdToNew(tiledLoop.getNumResults(), kNoMatch); SmallVector resultReplacement(tiledLoop.getNumResults()); - for (auto en : llvm::enumerate( + for (const auto &en : llvm::enumerate( llvm::zip(tiledLoop.outputs(), tiledLoop.getRegionOutputArgs()))) { size_t index = en.index(); Value out = std::get<0>(en.value()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp @@ -43,7 +43,7 @@ // Allocate a buffer for every tensor result. assert(linalgOp.getNumOutputs() == linalgOp->getNumResults()); - for (auto en : llvm::enumerate(linalgOp->getResultTypes())) { + for (const auto &en : llvm::enumerate(linalgOp->getResultTypes())) { size_t resultIndex = en.index(); Type resultType = en.value(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @@ -186,7 +186,7 @@ DenseSet unitDims; SmallVector unitDimsReductionLoops; ArrayAttr iteratorTypes = genericOp.iterator_types(); - for (auto expr : enumerate(invertedMap.getResults())) { + for (const auto &expr : enumerate(invertedMap.getResults())) { if (AffineDimExpr dimExpr = expr.value().dyn_cast()) if (dims[dimExpr.getPosition()] == 1) unitDims.insert(expr.index()); @@ -205,7 +205,7 @@ // Compute the iterator types of the modified op by dropping the one-trip // count loops. SmallVector newIteratorTypes; - for (auto attr : llvm::enumerate(iteratorTypes)) { + for (const auto &attr : llvm::enumerate(iteratorTypes)) { if (!unitDims.count(attr.index())) newIteratorTypes.push_back(attr.value()); } @@ -439,7 +439,7 @@ // If any result tensor has a modified shape, then add reshape to recover // the original shape. SmallVector resultReplacements; - for (auto result : llvm::enumerate(replacementOp.getResults())) { + for (const auto &result : llvm::enumerate(replacementOp.getResults())) { unsigned index = result.index() + replacementOp.getNumInputs(); auto origResultType = genericOp.getResult(result.index()).getType(); @@ -465,7 +465,7 @@ getReassociationMapForFoldingUnitDims(ArrayRef mixedSizes) { SmallVector reassociation; ReassociationIndices curr; - for (auto it : llvm::enumerate(mixedSizes)) { + for (const auto &it : llvm::enumerate(mixedSizes)) { auto dim = it.index(); auto size = it.value(); curr.push_back(dim); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -563,7 +563,7 @@ // dimension of the original op. SmallVector numExpandedDims(fusedIndexMap.getNumDims(), 1); expandedShapeMap.resize(fusedIndexMap.getNumDims()); - for (auto resultExpr : llvm::enumerate(fusedIndexMap.getResults())) { + for (const auto &resultExpr : llvm::enumerate(fusedIndexMap.getResults())) { unsigned pos = resultExpr.value().cast().getPosition(); AffineMap foldedDims = reassociationMaps[resultExpr.index()]; numExpandedDims[pos] = foldedDims.getNumResults(); @@ -579,7 +579,7 @@ // Compute reassociation map from the original op to the expanded op. unsigned sum = 0; reassociation.reserve(fusedIndexMap.getNumDims()); - for (auto numFoldedDim : llvm::enumerate(numExpandedDims)) { + for (const auto &numFoldedDim : llvm::enumerate(numExpandedDims)) { auto seq = llvm::seq(sum, sum + numFoldedDim.value()); reassociation.emplace_back(seq.begin(), seq.end()); sum += numFoldedDim.value(); @@ -859,7 +859,7 @@ if (!genericOp.hasTensorSemantics()) return failure(); SmallVector inputOperands = genericOp.getInputOperands(); - for (auto en : llvm::enumerate(inputOperands)) { + for (const auto &en : llvm::enumerate(inputOperands)) { auto reshapeOp = en.value()->get().getDefiningOp(); if (!reshapeOp) continue; @@ -974,7 +974,7 @@ // 1. Look for tensor_expand_shape operands and figure out save the // dimensions merged. SmallVector inputOperands = genericOp.getInputOperands(); - for (auto en : llvm::enumerate(inputOperands)) { + for (const auto &en : llvm::enumerate(inputOperands)) { auto reshapeOp = en.value()->get().template getDefiningOp(); if (!reshapeOp) @@ -1008,7 +1008,7 @@ // 2. Verify that we can merge the dimensions in the linalg and that we // don't need to create new reshapes operands. Inserting new reshape // operands would defeat the purpose of the transformation. - for (auto en : llvm::enumerate(inputOperands)) { + for (const auto &en : llvm::enumerate(inputOperands)) { if (en.value()->get() == newOperands[en.index()]) { AffineMap map = genericOp.getTiedIndexingMap(en.value()); for (unsigned i : llvm::seq(unsigned(0), map.getNumResults())) { @@ -1058,7 +1058,7 @@ newOp.region().begin()); // 6. Reshape the so that the type matches the uses. SmallVector newResults; - for (auto result : llvm::enumerate(newOp->getResults())) { + for (const auto &result : llvm::enumerate(newOp->getResults())) { newResults.push_back(rewriter.create( genericOp->getLoc(), genericOp.getOutputTensorTypes()[result.index()], result.value(), reassociation)); @@ -1405,7 +1405,7 @@ // All inputs should be constants. int numInputs = genericOp.getNumInputs(); SmallVector inputValues(numInputs); - for (auto operand : llvm::enumerate(genericOp.getInputOperands())) { + for (const auto &operand : llvm::enumerate(genericOp.getInputOperands())) { if (!matchPattern(operand.value()->get(), m_Constant(&inputValues[operand.index()]))) return failure(); @@ -1710,7 +1710,7 @@ continue; modifiedOutput = true; SmallVector dynamicDims; - for (auto dim : llvm::enumerate(operandType.getShape())) { + for (const auto &dim : llvm::enumerate(operandType.getShape())) { if (dim.value() != ShapedType::kDynamicSize) continue; dynamicDims.push_back(rewriter.createOrFold( diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -89,7 +89,7 @@ LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange map: " << map << "\n"); SmallVector shapeRanges(map.getNumResults(), nullptr); - for (auto en : llvm::enumerate(map.getResults())) { + for (const auto &en : llvm::enumerate(map.getResults())) { auto dimExpr = en.value().dyn_cast(); if (!dimExpr) continue; @@ -252,7 +252,7 @@ LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n"); DenseMap fusedLoopsAndRanges; Value shapedOperand = consumerOpOperand.get(); - for (auto en : llvm::enumerate(producerMap.getResults())) { + for (const auto &en : llvm::enumerate(producerMap.getResults())) { unsigned posInProducerLoop = en.value().cast().getPosition(); fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape( b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index()); @@ -523,7 +523,7 @@ static AffineMap pruneReductionDimsFromMap(ArrayRef iteratorTypes, AffineMap map) { llvm::SmallDenseSet projectedDims; - for (auto attr : llvm::enumerate(iteratorTypes)) { + for (const auto &attr : llvm::enumerate(iteratorTypes)) { if (!isParallelIterator(attr.value())) projectedDims.insert(attr.index()); } @@ -812,7 +812,7 @@ SmallVector fusedOps(fusionCandidates.size()); DenseMap origOpToFusedOp; origOpToFusedOp[rootOp.getOperation()] = tiledOp; - for (auto candidate : enumerate(llvm::reverse(fusionCandidates))) { + for (const auto &candidate : enumerate(llvm::reverse(fusionCandidates))) { LinalgOp origOp = candidate.value(); LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges); origOpToFusedOp[origOp.getOperation()] = fusedOp; diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp @@ -42,7 +42,7 @@ // Search the slice dimensions tiled by a tile loop dimension. DenseSet tiledSliceDimIndices; - for (auto en : enumerate(indexingMap.getResults())) { + for (const auto &en : enumerate(indexingMap.getResults())) { for (auto tiledLoopDim : tiledLoopDims) { if (en.value().isFunctionOfDim(tiledLoopDim)) tiledSliceDimIndices.insert(en.index()); @@ -304,7 +304,7 @@ // Update the root operation and append the loops and tile loop dimensions. rootOp = tiledRootOp->op; tileLoopOps.append(tiledRootOp->loops.begin(), tiledRootOp->loops.end()); - for (auto en : enumerate(tileSizes)) { + for (const auto &en : enumerate(tileSizes)) { // Copy only the tiled loop dimensions with non-zero tile size. if (en.value() == 0) continue; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp @@ -346,7 +346,7 @@ changed = false; func.walk([&](scf::ForOp forOp) { Operation *yield = forOp.getBody()->getTerminator(); - for (auto it : llvm::enumerate(forOp.getRegionIterArgs())) { + for (const auto &it : llvm::enumerate(forOp.getRegionIterArgs())) { OpOperand &ret = yield->getOpOperand(it.index()); HoistableWrite write = getLoopInvariantTransferWriteOpDefining(forOp, ret); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -277,7 +277,7 @@ // Collect loop control parameters for parallel and sequential dimensions. SmallVector seqLBs, seqUBs, seqSteps, seqIVs; SmallVector parLBs, parUBs, parSteps, parIVs; - for (auto en : llvm::enumerate( + for (const auto &en : llvm::enumerate( llvm::zip(tiledLoop.lowerBound(), tiledLoop.upperBound(), tiledLoop.step(), tiledLoop.getInductionVars()))) { Value lb, ub, step, iv; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -87,7 +87,7 @@ auto one = b.createOrFold(1); Value allocSize = one; - for (auto size : llvm::enumerate(boundingSubViewSize)) + for (const auto &size : llvm::enumerate(boundingSubViewSize)) allocSize = b.createOrFold(allocSize, size.value()); Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize, layout, alignment); @@ -219,7 +219,7 @@ SmallVector partialSizes; fullSizes.reserve(rank); partialSizes.reserve(rank); - for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { + for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { auto rangeValue = en.value(); // Try to extract a tight constant. LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n"); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -179,7 +179,7 @@ b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes); SmallVector iteratorTypes; - for (auto attr : + for (const auto &attr : enumerate(op.iterator_types().cast().getValue())) { if (loopIndexToRangeIndex.count(attr.index())) iteratorTypes.push_back(attr.value()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -193,7 +193,7 @@ SmallVector staticSizes; staticSizes.reserve(shape.size()); auto shapedOp = cast(sliceOp.getOperation()); - for (auto en : enumerate(shapedOp.getMixedSizes())) { + for (const auto &en : enumerate(shapedOp.getMixedSizes())) { // Skip dropped dimensions. if (droppedDims.contains(en.index())) continue; @@ -268,7 +268,7 @@ // linalg op around because it uses the dims of the original results. SmallVector paddedSubviewResults; paddedSubviewResults.reserve(opToPad->getNumResults()); - for (auto en : llvm::enumerate(paddedOp->getResults())) { + for (const auto &en : llvm::enumerate(paddedOp->getResults())) { Value paddedResult = en.value(); int64_t resultNumber = en.index(); int64_t rank = paddedResult.getType().cast().getRank(); @@ -441,7 +441,7 @@ // Tile the unfused loops; SmallVector unfusedLoopTileSizes; Value zero = rewriter.create(op->getLoc(), 0); - for (auto tileSize : enumerate(tileSizes)) { + for (const auto &tileSize : enumerate(tileSizes)) { if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index())) unfusedLoopTileSizes.push_back(zero); else @@ -522,7 +522,7 @@ } // Hoist the padding. - for (auto en : enumerate(depths)) { + for (const auto &en : enumerate(depths)) { OpOperand &opOperand = paddedOp->getOpOperand(en.index()); auto padTensorOp = opOperand.get().getDefiningOp(); if (!padTensorOp || en.value() == 0) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -256,7 +256,7 @@ auto yieldOp = dyn_cast(op); if (!yieldOp) return VectorizationResult{VectorizationStatus::Failure, nullptr}; - for (auto outputs : llvm::enumerate(yieldOp.values())) { + for (const auto &outputs : llvm::enumerate(yieldOp.values())) { // TODO: Scan for an opportunity for reuse. // TODO: use a map. Value vectorValue = bvm.lookup(outputs.value()); diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -169,7 +169,7 @@ SmallVector getDynOperands(Location loc, Value val, OpBuilder &b) { SmallVector dynOperands; auto shapedType = val.getType().cast(); - for (auto dim : llvm::enumerate(shapedType.getShape())) { + for (const auto &dim : llvm::enumerate(shapedType.getShape())) { if (dim.value() == ShapedType::kDynamicSize) dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index())); } @@ -310,7 +310,7 @@ SmallVector foldedOffsets(offsets.begin(), offsets.end()); AffineExpr dim1, dim2; bindDims(b.getContext(), dim1, dim2); - for (auto en : enumerate(producerOp.getMixedOffsets())) { + for (const auto &en : enumerate(producerOp.getMixedOffsets())) { SmallVector offsetValues = { getValueOrCreateConstantIndexOp(b, loc, foldedOffsets[en.index()]), getValueOrCreateConstantIndexOp(b, loc, en.value())}; @@ -403,7 +403,7 @@ if (distributionOptions.hasValue()) { // Collect loop ranges for parallel dimensions. SmallVector parallelLoopRanges; - for (auto iteratorType : enumerate(iteratorTypes)) + for (const auto &iteratorType : enumerate(iteratorTypes)) if (isParallelIterator(iteratorType.value())) parallelLoopRanges.push_back(loopRanges[iteratorType.index()]); @@ -435,7 +435,7 @@ // Filter out scf.for loops that were created out of parallel dimensions. SmallVector loops; - for (auto iteratorType : enumerate(iteratorTypes)) + for (const auto &iteratorType : enumerate(iteratorTypes)) if (isParallelIterator(iteratorType.value())) loops.push_back(loopNest.loops[iteratorType.index()]); @@ -677,7 +677,7 @@ distributionMethod.assign(distributionOptions->distributionMethod.begin(), distributionOptions->distributionMethod.end()); SmallVector parallelLoopRanges; - for (auto iteratorType : enumerate(iteratorTypes)) { + for (const auto &iteratorType : enumerate(iteratorTypes)) { if (isParallelIterator(iteratorType.value())) parallelLoopRanges.push_back(loopRanges[iteratorType.index()]); } @@ -686,7 +686,7 @@ SmallVector procInfo = options.procInfo(b, loc, parallelLoopRanges); unsigned index = 0; - for (auto iteratorType : enumerate(iteratorTypes)) { + for (const auto &iteratorType : enumerate(iteratorTypes)) { if (index >= procInfo.size()) break; if (isParallelIterator(iteratorType.value())) { diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -134,7 +134,7 @@ auto offsets = delinearize(strides, i); SmallVector extracted(expandedOperands.size()); - for (auto tuple : llvm::enumerate(expandedOperands)) + for (const auto &tuple : llvm::enumerate(expandedOperands)) extracted[tuple.index()] = builder.create(tuple.value(), offsets); diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -395,7 +395,7 @@ }; if (!checkCompatible(aOffset, bOffset)) return false; - for (auto aStride : enumerate(aStrides)) + for (const auto &aStride : enumerate(aStrides)) if (!checkCompatible(aStride.value(), bStrides[aStride.index()])) return false; } @@ -518,7 +518,7 @@ if (originalType.getRank() == reducedType.getRank()) return unusedDims; - for (auto dim : llvm::enumerate(sizes)) + for (const auto &dim : llvm::enumerate(sizes)) if (auto attr = dim.value().dyn_cast()) if (attr.cast().getInt() == 1) unusedDims.insert(dim.index()); @@ -1869,7 +1869,7 @@ if (!unusedDims) return nullptr; SmallVector shape; - for (auto sizes : llvm::enumerate(nonRankReducedType.getShape())) { + for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) { if (unusedDims->count(sizes.index())) continue; shape.push_back(sizes.value()); @@ -2001,7 +2001,7 @@ auto originalSizes = memRefType.getShape(); // Compute permuted sizes. SmallVector sizes(rank, 0); - for (auto en : llvm::enumerate(permutationMap.getResults())) + for (const auto &en : llvm::enumerate(permutationMap.getResults())) sizes[en.index()] = originalSizes[en.value().cast().getPosition()]; diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -249,7 +249,7 @@ SmallVectorImpl> &modifiers) { if (modifiers.size() > 2) return parser.emitError(parser.getNameLoc()) << " unexpected modifier(s)"; - for (auto mod : modifiers) { + for (const auto &mod : modifiers) { // Translate the string. If it has no value, then it was not a valid // modifier! auto symbol = symbolizeScheduleModifier(mod); diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp --- a/mlir/lib/Dialect/PDL/IR/PDL.cpp +++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp @@ -198,7 +198,7 @@ return success(); // Otherwise, make sure each of the types can be inferred. - for (auto it : llvm::enumerate(resultTypes)) { + for (const auto &it : llvm::enumerate(resultTypes)) { Operation *resultTypeOp = it.value().getDefiningOp(); assert(resultTypeOp && "expected valid result type operation"); diff --git a/mlir/lib/Dialect/SCF/SCF.cpp b/mlir/lib/Dialect/SCF/SCF.cpp --- a/mlir/lib/Dialect/SCF/SCF.cpp +++ b/mlir/lib/Dialect/SCF/SCF.cpp @@ -1248,7 +1248,7 @@ // Replace the operation by the new one. SmallVector repResults(op.getNumResults()); - for (auto en : llvm::enumerate(usedResults)) + for (const auto &en : llvm::enumerate(usedResults)) repResults[en.value().getResultNumber()] = newOp.getResult(en.index()); rewriter.replaceOp(op, repResults); return success(); @@ -1297,7 +1297,8 @@ SmallVector results(op->getNumResults()); assert(thenYieldArgs.size() == results.size()); assert(elseYieldArgs.size() == results.size()); - for (auto it : llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) { + for (const auto &it : + llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) { Value trueVal = std::get<0>(it.value()); Value falseVal = std::get<1>(it.value()); if (trueVal == falseVal) @@ -1565,7 +1566,7 @@ SmallVector prevValues; SmallVector nextValues; - for (auto pair : llvm::enumerate(combinedIf.getResults())) { + for (const auto &pair : llvm::enumerate(combinedIf.getResults())) { if (pair.index() < prevIf.getNumResults()) prevValues.push_back(pair.value()); else @@ -2369,7 +2370,7 @@ SmallVector newResultTypes; SmallVector newTermArgs; bool needUpdate = false; - for (auto it : + for (const auto &it : llvm::enumerate(llvm::zip(op.getResults(), afterArgs, termArgs))) { auto i = static_cast(it.index()); Value result = std::get<0>(it.value()); @@ -2404,7 +2405,7 @@ // null). SmallVector newResults(op.getNumResults()); SmallVector newAfterBlockArgs(op.getNumResults()); - for (auto it : llvm::enumerate(newResultsIndices)) { + for (const auto &it : llvm::enumerate(newResultsIndices)) { newResults[it.value()] = newWhile.getResult(it.index()); newAfterBlockArgs[it.value()] = newAfterBlock.getArgument(it.index()); } diff --git a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp --- a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp @@ -69,7 +69,7 @@ // Rewrite uses of the for-loop block arguments to the new while-loop // "after" arguments - for (auto barg : enumerate(forOp.getBody(0)->getArguments())) + for (const auto &barg : enumerate(forOp.getBody(0)->getArguments())) barg.value().replaceAllUsesWith(afterBlock->getArgument(barg.index())); // Inline for-loop body operations into 'after' region. @@ -87,7 +87,7 @@ // an extra value (the induction variable escapes the loop through being // carried in the set of iterargs). Instead, rewrite uses of the forOp // results. - for (auto arg : llvm::enumerate(forOp.getResults())) + for (const auto &arg : llvm::enumerate(forOp.getResults())) arg.value().replaceAllUsesWith(whileOp.getResult(arg.index() + 1)); rewriter.eraseOp(forOp); diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp --- a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp @@ -198,7 +198,7 @@ llvm::SmallVector newLoopArg; // For existing loop argument initialize them with the right version from the // prologue. - for (auto retVal : + for (const auto &retVal : llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) { Operation *def = retVal.value().getDefiningOp(); assert(def && "Only support loop carried dependencies of distance 1"); @@ -245,7 +245,7 @@ rewriter.setInsertionPoint(newForOp.getBody(), newForOp.getBody()->begin()); BlockAndValueMapping mapping; mapping.map(forOp.getInductionVar(), newForOp.getInductionVar()); - for (auto arg : llvm::enumerate(forOp.getRegionIterArgs())) { + for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) { mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]); } for (Operation *op : opOrder) { @@ -325,7 +325,7 @@ yieldOperands.push_back(mapping.lookupOrDefault(it.first)); } // Map the yield operand to the forOp returned value. - for (auto retVal : + for (const auto &retVal : llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) { Operation *def = retVal.value().getDefiningOp(); assert(def && "Only support loop carried dependencies of distance 1"); diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp --- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp @@ -160,8 +160,8 @@ ifInbound.getThenRegion().takeBody(op.getRegion()); Block &thenBlock = ifInbound.getThenRegion().front(); b.setInsertionPointToStart(innerLoop.getBody()); - for (auto ivs : llvm::enumerate(llvm::zip(innerLoop.getInductionVars(), - outerLoop.getInductionVars()))) { + for (const auto &ivs : llvm::enumerate(llvm::zip( + innerLoop.getInductionVars(), outerLoop.getInductionVars()))) { auto newIndex = b.create( op.getLoc(), std::get<0>(ivs.value()), std::get<1>(ivs.value())); thenBlock.getArgument(ivs.index()) diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp @@ -182,7 +182,7 @@ auto indexType = typeConverter.getIndexType(); auto attrName = spirv::getInterfaceVarABIAttrName(); - for (auto argType : llvm::enumerate(funcOp.getType().getInputs())) { + for (const auto &argType : llvm::enumerate(funcOp.getType().getInputs())) { auto abiInfo = funcOp.getArgAttrOfType( argType.index(), attrName); if (!abiInfo) { diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp @@ -235,7 +235,7 @@ return llvm::None; int64_t memrefSize = -1; - for (auto shape : enumerate(dims)) + for (const auto &shape : enumerate(dims)) memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]); return (offset + memrefSize) * elementSize.getValue(); @@ -557,7 +557,7 @@ return failure(); TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs()); - for (auto argType : enumerate(fnType.getInputs())) { + for (const auto &argType : enumerate(fnType.getInputs())) { auto convertedType = getTypeConverter()->convertType(argType.value()); if (!convertedType) return failure(); @@ -777,7 +777,7 @@ Value linearizedIndex = builder.create( loc, integerType, IntegerAttr::get(integerType, offset)); - for (auto index : llvm::enumerate(indices)) { + for (const auto &index : llvm::enumerate(indices)) { Value strideVal = builder.create( loc, integerType, IntegerAttr::get(integerType, strides[index.index()])); diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -1669,7 +1669,7 @@ "ReduceOp operates on an extent tensor"); } - for (auto type : llvm::enumerate(op.getInitVals())) + for (const auto &type : llvm::enumerate(op.getInitVals())) if (block.getArgument(type.index() + 2).getType() != type.value().getType()) return op.emitOpError() << "type mismatch between argument " << type.index() + 2 diff --git a/mlir/lib/Dialect/StandardOps/Transforms/DecomposeCallGraphTypes.cpp b/mlir/lib/Dialect/StandardOps/Transforms/DecomposeCallGraphTypes.cpp --- a/mlir/lib/Dialect/StandardOps/Transforms/DecomposeCallGraphTypes.cpp +++ b/mlir/lib/Dialect/StandardOps/Transforms/DecomposeCallGraphTypes.cpp @@ -67,7 +67,7 @@ // Convert function arguments using the provided TypeConverter. TypeConverter::SignatureConversion conversion(functionType.getNumInputs()); - for (auto argType : llvm::enumerate(functionType.getInputs())) { + for (const auto &argType : llvm::enumerate(functionType.getInputs())) { SmallVector decomposedTypes; if (failed(typeConverter->convertType(argType.value(), decomposedTypes))) return failure(); diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -1030,7 +1030,7 @@ ArrayRef resultShape = getType().getShape(); SmallVector mixedSizes = getMixedSizes(); unsigned shapePos = 0; - for (auto size : enumerate(mixedSizes)) { + for (const auto &size : enumerate(mixedSizes)) { Optional sizeVal = getConstantIntValue(size.value()); // If the size is not 1, or if the current matched dimension of the result // is the same static shape as the size value (which is 1), then the @@ -1052,7 +1052,7 @@ SmallVector mixedSizes = getMixedSizes(); llvm::SmallDenseSet droppedDims = getDroppedDims(); Location loc = getLoc(); - for (auto size : enumerate(mixedSizes)) { + for (const auto &size : enumerate(mixedSizes)) { if (droppedDims.count(size.index())) continue; if (auto attr = size.value().dyn_cast()) { diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -1812,7 +1812,7 @@ if (resultKnowledge.size() != yieldOp.getNumOperands()) return failure(); - for (auto it : llvm::enumerate(yieldOp.getOperands())) { + for (const auto &it : llvm::enumerate(yieldOp.getOperands())) { int32_t index = it.index(); auto meet = ValueKnowledge::meet( resultKnowledge[index], @@ -1857,7 +1857,7 @@ if (resultKnowledge.size() != yieldOp.getNumOperands()) return failure(); - for (auto it : llvm::enumerate(yieldOp.getOperands())) { + for (const auto &it : llvm::enumerate(yieldOp.getOperands())) { int32_t index = it.index(); if (auto meet = ValueKnowledge::meet( resultKnowledge[index], diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp --- a/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp +++ b/mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp @@ -132,7 +132,7 @@ } for (auto yieldOp : yieldOps) { - for (auto it : llvm::enumerate(yieldOp.getOperands())) { + for (const auto &it : llvm::enumerate(yieldOp.getOperands())) { auto newKnowledge = ValueKnowledge::getKnowledgeFromType(it.value().getType()); yieldTypeInfo[it.index()] = diff --git a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp --- a/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp +++ b/mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp @@ -253,7 +253,7 @@ return true; unsigned nDims = reassociation[0].getNumDims(); unsigned nextExpectedDim = 0; - for (auto it : llvm::enumerate(reassociation)) { + for (const auto &it : llvm::enumerate(reassociation)) { auto m = it.value(); if (m.getNumDims() != nDims || m.getNumSymbols() != 0) { if (invalidIndex) diff --git a/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp b/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp @@ -126,7 +126,7 @@ // 1. Separate reduction and parallel dims. SmallVector parallelDims, parallelShapes; SmallVector reductionDims, reductionShapes; - for (auto it : llvm::enumerate(reductionMask)) { + for (const auto &it : llvm::enumerate(reductionMask)) { int64_t i = it.index(); bool isReduction = it.value(); if (isReduction) { diff --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/VectorOps.cpp @@ -269,7 +269,7 @@ result.addTypes(targetType); SmallVector reductionDims; - for (auto en : llvm::enumerate(reductionMask)) + for (const auto &en : llvm::enumerate(reductionMask)) if (en.value()) reductionDims.push_back(en.index()); result.addAttribute(getReductionDimsAttrName(), @@ -613,7 +613,7 @@ // that the number of map outputs equals the rank of its associated // vector operand. unsigned numIterators = op.iterator_types().getValue().size(); - for (auto it : llvm::enumerate(op.indexing_maps())) { + for (const auto &it : llvm::enumerate(op.indexing_maps())) { auto index = it.index(); auto map = it.value().cast().getValue(); if (map.getNumSymbols() != 0) @@ -693,7 +693,7 @@ getDimMap(ArrayRef indexingMaps, ArrayAttr iteratorTypes, StringRef targetIteratorTypeName, MLIRContext *context) { std::vector> dimMap; - for (auto it : llvm::enumerate(iteratorTypes)) { + for (const auto &it : llvm::enumerate(iteratorTypes)) { auto iteratorTypeName = it.value().cast().getValue(); if (iteratorTypeName != targetIteratorTypeName) continue; @@ -713,7 +713,7 @@ auto resVectorType = getResultType().dyn_cast(); SmallVector indexingMaps(getIndexingMaps()); SmallVector iterationShape; - for (auto it : llvm::enumerate(iterator_types())) { + for (const auto &it : llvm::enumerate(iterator_types())) { // Search lhs/rhs map results for 'targetExpr'. auto targetExpr = getAffineDimExpr(it.index(), getContext()); auto iteratorTypeName = it.value().cast().getValue(); @@ -736,7 +736,7 @@ std::vector> &iterationIndexMap) { unsigned numMaps = indexing_maps().getValue().size(); iterationIndexMap.resize(numMaps); - for (auto it : llvm::enumerate(indexing_maps())) { + for (const auto &it : llvm::enumerate(indexing_maps())) { auto index = it.index(); auto map = it.value().cast().getValue(); for (unsigned i = 0, e = map.getNumResults(); i < e; ++i) { @@ -932,7 +932,7 @@ if (positionAttr.size() > static_cast(op.getVectorType().getRank())) return op.emitOpError( "expected position attribute of rank smaller than vector rank"); - for (auto en : llvm::enumerate(positionAttr)) { + for (const auto &en : llvm::enumerate(positionAttr)) { auto attr = en.value().dyn_cast(); if (!attr || attr.getInt() < 0 || attr.getInt() >= op.getVectorType().getDimSize(en.index())) @@ -1510,7 +1510,7 @@ return op.emitOpError("mask length mismatch"); // Verify all indices. int64_t indexSize = v1Type.getDimSize(0) + v2Type.getDimSize(0); - for (auto en : llvm::enumerate(maskAttr)) { + for (const auto &en : llvm::enumerate(maskAttr)) { auto attr = en.value().dyn_cast(); if (!attr || attr.getInt() < 0 || attr.getInt() >= indexSize) return op.emitOpError("mask index #") @@ -1620,7 +1620,7 @@ (positionAttr.size() != static_cast(destVectorType.getRank()))) return op.emitOpError( "expected position attribute rank to match the dest vector rank"); - for (auto en : llvm::enumerate(positionAttr)) { + for (const auto &en : llvm::enumerate(positionAttr)) { auto attr = en.value().dyn_cast(); if (!attr || attr.getInt() < 0 || attr.getInt() >= destVectorType.getDimSize(en.index())) @@ -2794,7 +2794,7 @@ newIndices.push_back(getValueOrCreateConstantIndexOp( rewriter, extractOp.getLoc(), offset)); } - for (auto it : llvm::enumerate(xferOp.indices())) { + for (const auto &it : llvm::enumerate(xferOp.indices())) { OpFoldResult offset = extractOp.getMixedOffsets()[it.index() + rankReduced]; newIndices.push_back(rewriter.create( @@ -3856,7 +3856,7 @@ if (rank != size) return op.emitOpError("transposition length mismatch: ") << size; SmallVector seen(rank, false); - for (auto ta : llvm::enumerate(transpAttr)) { + for (const auto &ta : llvm::enumerate(transpAttr)) { int64_t i = ta.value().cast().getInt(); if (i < 0 || i >= rank) return op.emitOpError("transposition index out of range: ") << i; @@ -3947,7 +3947,7 @@ // result dimension size. auto resultShape = resultType.getShape(); SmallVector maskDimSizes; - for (auto it : llvm::enumerate(op.mask_dim_sizes())) { + for (const auto &it : llvm::enumerate(op.mask_dim_sizes())) { int64_t attrValue = it.value().cast().getInt(); if (attrValue < 0 || attrValue > resultShape[it.index()]) return op.emitOpError( diff --git a/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp b/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp --- a/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp @@ -79,7 +79,7 @@ // Apply the reverse transpose to deduce the type of the transfer_read. ArrayRef originalShape = op.getVectorType().getShape(); SmallVector newVectorShape(originalShape.size()); - for (auto pos : llvm::enumerate(permutation)) { + for (const auto &pos : llvm::enumerate(permutation)) { newVectorShape[pos.value()] = originalShape[pos.index()]; } diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -53,7 +53,7 @@ static SmallVector adjustIter(ArrayAttr iteratorTypes, int64_t index) { SmallVector results; - for (auto it : llvm::enumerate(iteratorTypes)) { + for (const auto &it : llvm::enumerate(iteratorTypes)) { int64_t idx = it.index(); if (idx == index) continue; @@ -871,7 +871,7 @@ auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size()); SmallVector exprs; SmallVector iteratorTypes; - for (auto isReduceDim : llvm::enumerate(reductionMask)) { + for (const auto &isReduceDim : llvm::enumerate(reductionMask)) { if (!isReduceDim.value()) { iteratorTypes.push_back(getParallelIteratorTypeName()); exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index())); @@ -997,7 +997,7 @@ broadcast.getVectorType().getRank() - srcType.getRank(); bool innerDimBroadcast = false; SmallVector originalDims; - for (auto dim : llvm::enumerate(srcType.getShape())) { + for (const auto &dim : llvm::enumerate(srcType.getShape())) { if (dim.value() != broadcast.getVectorType().getDimSize(rankDiff + dim.index())) { innerDimBroadcast = true; diff --git a/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp b/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp --- a/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp +++ b/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp @@ -52,7 +52,7 @@ getVectorOffset(originalShape, targetShape, index); // Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'. SmallVector slicedIndices(indices.begin(), indices.end()); - for (auto dim : llvm::enumerate(permutationMap.getResults())) { + for (const auto &dim : llvm::enumerate(permutationMap.getResults())) { if (isBroadcast(dim.value())) continue; unsigned pos = dim.value().cast().getPosition(); @@ -429,7 +429,7 @@ for (unsigned i : llvm::seq(unsigned(0), affineMap.getNumResults())) map[affineMap.getDimPosition(i)] = extract.getResultType().getDimSize(i); SmallVector extractOperands; - for (auto it : llvm::enumerate(contract.getIndexingMaps())) { + for (const auto &it : llvm::enumerate(contract.getIndexingMaps())) { // For each operands calculate the new vector type after distribution. Value operand = contract->getOperand(it.index()); auto vecType = operand.getType().cast(); diff --git a/mlir/lib/IR/AffineExpr.cpp b/mlir/lib/IR/AffineExpr.cpp --- a/mlir/lib/IR/AffineExpr.cpp +++ b/mlir/lib/IR/AffineExpr.cpp @@ -1020,7 +1020,7 @@ // as lhs/rhs, and store the indices, constant coefficient corresponding to // the indices in `coefficients` map, and affine expression corresponding to // in indices in `indexToExprMap` map. - for (auto it : llvm::enumerate(localExprs)) { + for (const auto &it : llvm::enumerate(localExprs)) { AffineExpr expr = it.value(); if (flatExprs[numDims + numSymbols + it.index()] == 0) continue; diff --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp --- a/mlir/lib/IR/AffineMap.cpp +++ b/mlir/lib/IR/AffineMap.cpp @@ -121,7 +121,7 @@ if (getNumDims() < getNumResults()) return false; unsigned suffixStart = getNumDims() - getNumResults(); - for (auto idxAndExpr : llvm::enumerate(getResults())) { + for (const auto &idxAndExpr : llvm::enumerate(getResults())) { unsigned resIdx = idxAndExpr.index(); AffineExpr expr = idxAndExpr.value(); if (auto constExpr = expr.dyn_cast()) { @@ -168,7 +168,7 @@ getNumResults() > getNumInputs() ? getNumResults() - getNumInputs() : 0; llvm::SmallBitVector dimFound(std::max(getNumInputs(), getNumResults()), false); - for (auto idxAndExpr : llvm::enumerate(getResults())) { + for (const auto &idxAndExpr : llvm::enumerate(getResults())) { unsigned resIdx = idxAndExpr.index(); AffineExpr expr = idxAndExpr.value(); // Each result may be either a constant 0 (broadcast dimension) or a @@ -675,7 +675,7 @@ return map; assert(map.getNumSymbols() == 0 && "expected map without symbols"); SmallVector exprs(map.getNumDims()); - for (auto en : llvm::enumerate(map.getResults())) { + for (const auto &en : llvm::enumerate(map.getResults())) { auto expr = en.value(); // Skip non-permutations. if (auto d = expr.dyn_cast()) { diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -1036,7 +1036,7 @@ } // AffineExpr for strides. - for (auto en : llvm::enumerate(strides)) { + for (const auto &en : llvm::enumerate(strides)) { auto dim = en.index(); auto stride = en.value(); assert(stride != 0 && "Invalid stride specification"); diff --git a/mlir/lib/IR/Verifier.cpp b/mlir/lib/IR/Verifier.cpp --- a/mlir/lib/IR/Verifier.cpp +++ b/mlir/lib/IR/Verifier.cpp @@ -316,7 +316,7 @@ for (Operation &op : block) { if (isReachable) { // Check that operands properly dominate this use. - for (auto operand : llvm::enumerate(op.getOperands())) { + for (const auto &operand : llvm::enumerate(op.getOperands())) { if (domInfo.properlyDominates(operand.value(), &op)) continue; diff --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp --- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp +++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp @@ -131,7 +131,7 @@ << succInputsTypes.size(); } - for (auto typesIdx : + for (const auto &typesIdx : llvm::enumerate(llvm::zip(*sourceTypes, succInputsTypes))) { Type sourceType = std::get<0>(typesIdx.value()); Type inputType = std::get<1>(typesIdx.value()); @@ -266,7 +266,7 @@ return false; // Compute index of region. int64_t beginIndex = -1; - for (auto it : llvm::enumerate(branchOp->getRegions())) + for (const auto &it : llvm::enumerate(branchOp->getRegions())) if (&it.value() == begin) beginIndex = it.index(); assert(beginIndex != -1 && "could not find region in op"); diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp --- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp +++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp @@ -189,7 +189,7 @@ if (failed(componentTypeFn(context, location, operands, attributes, regions, retComponents))) return failure(); - for (auto shapeAndType : retComponents) { + for (const auto &shapeAndType : retComponents) { assert(shapeAndType.getAttribute() == nullptr && "attribute not supported"); if (shapeAndType.hasRank()) inferredReturnTypes.push_back(RankedTensorType::get( diff --git a/mlir/lib/Reducer/ReductionTreePass.cpp b/mlir/lib/Reducer/ReductionTreePass.cpp --- a/mlir/lib/Reducer/ReductionTreePass.cpp +++ b/mlir/lib/Reducer/ReductionTreePass.cpp @@ -41,7 +41,7 @@ std::vector opsNotInRange; std::vector opsInRange; size_t keepIndex = 0; - for (auto op : enumerate(region.getOps())) { + for (const auto &op : enumerate(region.getOps())) { int index = op.index(); if (keepIndex < rangeToKeep.size() && index == rangeToKeep[keepIndex].second) diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp --- a/mlir/lib/Rewrite/ByteCode.cpp +++ b/mlir/lib/Rewrite/ByteCode.cpp @@ -198,9 +198,9 @@ maxTypeRangeMemoryIndex(maxTypeRangeMemoryIndex), maxValueRangeMemoryIndex(maxValueRangeMemoryIndex), maxLoopLevel(maxLoopLevel) { - for (auto it : llvm::enumerate(constraintFns)) + for (const auto &it : llvm::enumerate(constraintFns)) constraintToMemIndex.try_emplace(it.value().first(), it.index()); - for (auto it : llvm::enumerate(rewriteFns)) + for (const auto &it : llvm::enumerate(rewriteFns)) externalRewriterToMemIndex.try_emplace(it.value().first(), it.index()); } @@ -631,7 +631,7 @@ ByteCodeLiveRange &defRange = defIt.second; // Try to allocate to an existing index. - for (auto existingIndexIt : llvm::enumerate(allocatedIndices)) { + for (const auto &existingIndexIt : llvm::enumerate(allocatedIndices)) { ByteCodeLiveRange &existingRange = existingIndexIt.value(); if (!defRange.overlaps(existingRange)) { existingRange.unionWith(defRange); diff --git a/mlir/lib/Rewrite/PatternApplicator.cpp b/mlir/lib/Rewrite/PatternApplicator.cpp --- a/mlir/lib/Rewrite/PatternApplicator.cpp +++ b/mlir/lib/Rewrite/PatternApplicator.cpp @@ -53,7 +53,7 @@ // Apply the cost model to the bytecode patterns first, and then the native // patterns. if (const PDLByteCode *bytecode = frozenPatternList.getPDLByteCode()) { - for (auto it : llvm::enumerate(bytecode->getPatterns())) + for (const auto &it : llvm::enumerate(bytecode->getPatterns())) mutableByteCodeState->updatePatternBenefit(it.index(), model(it.value())); } diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -821,7 +821,7 @@ currentEntryBlock = blockList[0]; // Add function arguments to the entry block. - for (auto kv : llvm::enumerate(f->args())) + for (const auto &kv : llvm::enumerate(f->args())) instMap[&kv.value()] = blockList[0]->addArgument(functionType.getParamType(kv.index())); diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -380,7 +380,7 @@ // the case branch that was taken. if (switchOp.getDefaultDestination() == current) return switchOp.getDefaultOperands()[index]; - for (auto i : llvm::enumerate(switchOp.getCaseDestinations())) + for (const auto &i : llvm::enumerate(switchOp.getCaseDestinations())) if (i.value() == current) return switchOp.getCaseOperands(i.index())[index]; } diff --git a/mlir/lib/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Transforms/BufferResultsToOutParams.cpp --- a/mlir/lib/Transforms/BufferResultsToOutParams.cpp +++ b/mlir/lib/Transforms/BufferResultsToOutParams.cpp @@ -25,7 +25,7 @@ // Collect information about the results will become appended arguments. SmallVector erasedResultTypes; SmallVector erasedResultIndices; - for (auto resultType : llvm::enumerate(functionType.getResults())) { + for (const auto &resultType : llvm::enumerate(functionType.getResults())) { if (resultType.value().isa()) { erasedResultIndices.push_back(resultType.index()); erasedResultTypes.push_back(resultType.value()); diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -84,7 +84,7 @@ OpBuilder bOuter(forOp); // Put together alloc operands for any dynamic dimensions of the memref. SmallVector allocOperands; - for (auto dim : llvm::enumerate(oldMemRefType.getShape())) { + for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) { if (dim.value() == ShapedType::kDynamicSize) allocOperands.push_back(bOuter.createOrFold( forOp.getLoc(), oldMemRef, dim.index())); diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -252,7 +252,7 @@ op->setLoc(loc); op->setAttrs(attrs); op->setOperands(operands); - for (auto it : llvm::enumerate(successors)) + for (const auto &it : llvm::enumerate(successors)) op->setSuccessor(it.value(), it.index()); } @@ -1255,7 +1255,7 @@ remapped.reserve(llvm::size(values)); SmallVector legalTypes; - for (auto it : llvm::enumerate(values)) { + for (const auto &it : llvm::enumerate(values)) { Value operand = it.value(); Type origType = operand.getType(); diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp --- a/mlir/lib/Transforms/Utils/InliningUtils.cpp +++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp @@ -215,7 +215,7 @@ } else { // Otherwise, there were multiple blocks inlined. Add arguments to the post // insertion block to represent the results to replace. - for (auto resultToRepl : llvm::enumerate(resultsToReplace)) { + for (const auto &resultToRepl : llvm::enumerate(resultsToReplace)) { resultToRepl.value().replaceAllUsesWith(postInsertBlock->addArgument( regionResultTypes[resultToRepl.index()])); } diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -3355,7 +3355,7 @@ // Add the body for the full tile loop nest. BlockAndValueMapping operandMap; - for (auto loopEn : llvm::enumerate(inputNest)) + for (const auto &loopEn : llvm::enumerate(inputNest)) operandMap.map(loopEn.value().getInductionVar(), fullTileLoops[loopEn.index()].getInductionVar()); b = OpBuilder::atBlockTerminator(fullTileLoops.back().getBody()); diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp --- a/mlir/lib/Transforms/Utils/RegionUtils.cpp +++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp @@ -589,7 +589,7 @@ 1 + blocksToMerge.size(), SmallVector(operandsToMerge.size())); unsigned curOpIndex = 0; - for (auto it : llvm::enumerate(operandsToMerge)) { + for (const auto &it : llvm::enumerate(operandsToMerge)) { unsigned nextOpOffset = it.value().first - curOpIndex; curOpIndex = it.value().first; diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp --- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp +++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp @@ -182,7 +182,7 @@ if (failed(shapedOp.reifyReturnTypeShapes(b, op->getOperands(), shapes)) || !llvm::hasSingleElement(shapes)) return; - for (auto it : llvm::enumerate(shapes)) { + for (const auto &it : llvm::enumerate(shapes)) { op->emitRemark() << "value " << it.index() << ": " << it.value().getDefiningOp(); } diff --git a/mlir/test/lib/IR/TestPrintDefUse.cpp b/mlir/test/lib/IR/TestPrintDefUse.cpp --- a/mlir/test/lib/IR/TestPrintDefUse.cpp +++ b/mlir/test/lib/IR/TestPrintDefUse.cpp @@ -41,7 +41,7 @@ // Print information about the user of each of the result. llvm::outs() << "Has " << op->getNumResults() << " results:\n"; - for (auto indexedResult : llvm::enumerate(op->getResults())) { + for (const auto &indexedResult : llvm::enumerate(op->getResults())) { Value result = indexedResult.value(); llvm::outs() << " - Result " << indexedResult.index(); if (result.use_empty()) { diff --git a/mlir/test/lib/IR/TestSlicing.cpp b/mlir/test/lib/IR/TestSlicing.cpp --- a/mlir/test/lib/IR/TestSlicing.cpp +++ b/mlir/test/lib/IR/TestSlicing.cpp @@ -33,7 +33,7 @@ builder.create(loc, clonedFuncOpName, parentFuncOp.getType()); BlockAndValueMapping mapper; builder.setInsertionPointToEnd(clonedFuncOp.addEntryBlock()); - for (auto arg : enumerate(parentFuncOp.getArguments())) + for (const auto &arg : enumerate(parentFuncOp.getArguments())) mapper.map(arg.value(), clonedFuncOp.getArgument(arg.index())); SetVector slice; getBackwardSlice(op, &slice); diff --git a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp --- a/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp +++ b/mlir/tools/mlir-linalg-ods-gen/mlir-linalg-ods-yaml-gen.cpp @@ -361,7 +361,7 @@ static Optional findTensorDefArgIndex(StringRef name, SmallVectorImpl &args) { - for (auto it : llvm::enumerate(args)) { + for (const auto &it : llvm::enumerate(args)) { if (it.value().name == name) return it.index(); } @@ -382,7 +382,7 @@ return std::string("helper.getFloat64Type()"); // Search all argument types. - for (auto it : llvm::enumerate(args)) { + for (const auto &it : llvm::enumerate(args)) { if (it.value().typeVar == typeVar) return llvm::formatv("block.getArgument({0}).getType()", it.index()) .str(); diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp --- a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp @@ -561,7 +561,7 @@ MethodParameter("const KeyTy &", "tblgenKey")); if (!def.hasStorageCustomConstructor()) { auto &body = construct->body().indent(); - for (auto it : llvm::enumerate(params)) { + for (const auto &it : llvm::enumerate(params)) { body << formatv("auto {0} = std::get<{1}>(tblgenKey);\n", it.value().getName(), it.index()); } diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp --- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp @@ -660,7 +660,7 @@ /// Collect all of the attribute's or type's parameters. SmallVector> vars; /// Ensure that none of the parameters have already been captured. - for (auto it : llvm::enumerate(def.getParameters())) { + for (const auto &it : llvm::enumerate(def.getParameters())) { if (seenParams.test(it.index())) { return emitError("`params` captures duplicate parameter: " + it.value().getName()); diff --git a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp --- a/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRIntrinsicGen.cpp @@ -65,7 +65,7 @@ const char *listName) { auto results = record.getValueAsListOfDefs(listName); IndicesTy overloadedOps(results.size()); - for (auto r : llvm::enumerate(results)) { + for (const auto &r : llvm::enumerate(results)) { llvm::MVT::SimpleValueType vt = getValueType(r.value()); switch (vt) { case llvm::MVT::iAny: diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -2265,7 +2265,7 @@ body << " {\n unsigned index = 0; (void)index;\n"; - for (auto staticValue : llvm::enumerate(values)) { + for (const auto &staticValue : llvm::enumerate(values)) { const NamedTypeConstraint &value = staticValue.value(); bool hasPredicate = value.hasPredicate(); @@ -2330,7 +2330,7 @@ return; body << " {\n unsigned index = 0; (void)index;\n"; - for (auto it : llvm::enumerate(regions)) { + for (const auto &it : llvm::enumerate(regions)) { const auto ®ion = it.value(); if (canSkip(region)) continue; diff --git a/mlir/tools/mlir-tblgen/OpDocGen.cpp b/mlir/tools/mlir-tblgen/OpDocGen.cpp --- a/mlir/tools/mlir-tblgen/OpDocGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDocGen.cpp @@ -235,7 +235,7 @@ os << "\nSyntax:\n\n```\n!" << def.getDialect().getName() << "." << def.getMnemonic() << "<\n"; - for (auto it : llvm::enumerate(parameters)) { + for (const auto &it : llvm::enumerate(parameters)) { const AttrOrTypeParameter ¶m = it.value(); os << " " << param.getSyntax(); if (it.index() < (parameters.size() - 1)) diff --git a/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp b/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp --- a/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp +++ b/mlir/tools/mlir-tblgen/OpPythonBindingGen.cpp @@ -916,7 +916,7 @@ /// Emits named accessors to regions. static void emitRegionAccessors(const Operator &op, raw_ostream &os) { - for (auto en : llvm::enumerate(op.getRegions())) { + for (const auto &en : llvm::enumerate(op.getRegions())) { const NamedRegion ®ion = en.value(); if (region.name.empty()) continue;