From e4853be2f130c3e27f6c84fc4ad1d66d8b9a3810 Mon Sep 17 00:00:00 2001 From: Mehdi Amini Date: Sun, 2 Jan 2022 22:02:14 +0000 Subject: [PATCH] Apply clang-tidy fixes for performance-for-range-copy to MLIR (NFC) --- mlir/lib/Analysis/AffineStructures.cpp | 2 +- mlir/lib/Analysis/LoopAnalysis.cpp | 3 ++- mlir/lib/Analysis/NumberOfExecutions.cpp | 2 +- mlir/lib/Analysis/SliceAnalysis.cpp | 2 +- mlir/lib/Bindings/Python/IRCore.cpp | 8 +++---- .../Conversion/GPUCommon/GPUOpsLowering.cpp | 6 ++--- .../GPUCommon/GPUToLLVMConversion.cpp | 2 +- mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp | 2 +- .../ConvertLaunchFuncToVulkanCalls.cpp | 2 +- mlir/lib/Conversion/LLVMCommon/Pattern.cpp | 4 ++-- .../Conversion/LLVMCommon/VectorPattern.cpp | 2 +- .../Conversion/MemRefToLLVM/MemRefToLLVM.cpp | 3 ++- .../PDLToPDLInterp/PDLToPDLInterp.cpp | 4 ++-- .../PDLToPDLInterp/PredicateTree.cpp | 4 ++-- mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp | 2 +- mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp | 2 +- .../ConvertLaunchFuncToLLVMCalls.cpp | 2 +- .../StandardToLLVM/StandardToLLVM.cpp | 2 +- .../Conversion/TosaToLinalg/TosaToLinalg.cpp | 4 ++-- .../Conversion/VectorToGPU/VectorToGPU.cpp | 4 ++-- .../VectorToLLVM/ConvertVectorToLLVM.cpp | 5 +++-- mlir/lib/Dialect/Affine/IR/AffineOps.cpp | 2 +- mlir/lib/Dialect/GPU/IR/GPUDialect.cpp | 2 +- .../GPU/Transforms/KernelOutlining.cpp | 4 ++-- .../GPU/Transforms/MemoryPromotion.cpp | 2 +- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 2 +- .../Linalg/Analysis/DependenceAnalysis.cpp | 2 +- mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp | 10 ++++----- .../Dialect/Linalg/Transforms/Bufferize.cpp | 2 +- .../Linalg/Transforms/DropUnitDims.cpp | 8 +++---- .../Linalg/Transforms/ElementwiseOpFusion.cpp | 16 +++++++------- mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp | 8 +++---- .../Linalg/Transforms/FusionOnTensors.cpp | 4 ++-- .../Dialect/Linalg/Transforms/Hoisting.cpp | 2 +- mlir/lib/Dialect/Linalg/Transforms/Loops.cpp | 2 +- .../Dialect/Linalg/Transforms/Promotion.cpp | 4 ++-- mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp | 2 +- .../Dialect/Linalg/Transforms/Transforms.cpp | 8 +++---- .../Linalg/Transforms/Vectorization.cpp | 2 +- mlir/lib/Dialect/Linalg/Utils/Utils.cpp | 12 +++++----- mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp | 10 ++++----- mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 2 +- mlir/lib/Dialect/PDL/IR/PDL.cpp | 2 +- mlir/lib/Dialect/SCF/SCF.cpp | 11 +++++----- .../lib/Dialect/SCF/Transforms/ForToWhile.cpp | 4 ++-- .../Dialect/SCF/Transforms/LoopPipelining.cpp | 6 ++--- .../SCF/Transforms/ParallelLoopTiling.cpp | 4 ++-- .../Transforms/LowerABIAttributesPass.cpp | 2 +- .../SPIRV/Transforms/SPIRVConversion.cpp | 6 ++--- mlir/lib/Dialect/Shape/IR/Shape.cpp | 2 +- mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 4 ++-- .../VectorMultiDimReductionTransforms.cpp | 2 +- mlir/lib/Dialect/Vector/VectorOps.cpp | 22 +++++++++---------- ...rTransferPermutationMapRewritePatterns.cpp | 2 +- mlir/lib/Dialect/Vector/VectorTransforms.cpp | 6 ++--- .../Dialect/Vector/VectorUnrollDistribute.cpp | 4 ++-- mlir/lib/IR/AffineExpr.cpp | 2 +- mlir/lib/IR/AffineMap.cpp | 6 ++--- mlir/lib/IR/BuiltinTypes.cpp | 2 +- mlir/lib/IR/Verifier.cpp | 2 +- mlir/lib/Interfaces/ControlFlowInterfaces.cpp | 4 ++-- mlir/lib/Interfaces/InferTypeOpInterface.cpp | 2 +- mlir/lib/Reducer/ReductionTreePass.cpp | 2 +- mlir/lib/Rewrite/ByteCode.cpp | 6 ++--- mlir/lib/Rewrite/PatternApplicator.cpp | 2 +- mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp | 2 +- mlir/lib/Target/LLVMIR/ModuleTranslation.cpp | 2 +- .../Transforms/BufferResultsToOutParams.cpp | 2 +- mlir/lib/Transforms/PipelineDataTransfer.cpp | 2 +- .../Transforms/Utils/DialectConversion.cpp | 4 ++-- mlir/lib/Transforms/Utils/InliningUtils.cpp | 2 +- mlir/lib/Transforms/Utils/LoopUtils.cpp | 2 +- mlir/lib/Transforms/Utils/RegionUtils.cpp | 2 +- 73 files changed, 150 insertions(+), 146 deletions(-) diff --git a/mlir/lib/Analysis/AffineStructures.cpp b/mlir/lib/Analysis/AffineStructures.cpp index 205abe280d9b..3742a2428148 100644 --- a/mlir/lib/Analysis/AffineStructures.cpp +++ b/mlir/lib/Analysis/AffineStructures.cpp @@ -3346,7 +3346,7 @@ AffineMap mlir::alignAffineMapWithValues(AffineMap map, ValueRange operands, newSyms->append(syms.begin(), syms.end()); } - for (auto operand : llvm::enumerate(operands)) { + for (const auto &operand : llvm::enumerate(operands)) { // Compute replacement dim/sym of operand. AffineExpr replacement; auto dimIt = std::find(dims.begin(), dims.end(), operand.value()); diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp index 0672f25671ec..914bc1604d39 100644 --- a/mlir/lib/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Analysis/LoopAnalysis.cpp @@ -353,7 +353,8 @@ bool mlir::isOpwiseShiftValid(AffineForOp forOp, ArrayRef shifts) { // Work backwards over the body of the block so that the shift of a use's // ancestor operation in the block gets recorded before it's looked up. DenseMap forBodyShift; - for (auto it : llvm::enumerate(llvm::reverse(forBody->getOperations()))) { + for (const auto &it : + llvm::enumerate(llvm::reverse(forBody->getOperations()))) { auto &op = it.value(); // Get the index of the current operation, note that we are iterating in diff --git a/mlir/lib/Analysis/NumberOfExecutions.cpp b/mlir/lib/Analysis/NumberOfExecutions.cpp index ad30058d3d73..ad90cee92ee8 100644 --- a/mlir/lib/Analysis/NumberOfExecutions.cpp +++ b/mlir/lib/Analysis/NumberOfExecutions.cpp @@ -52,7 +52,7 @@ static void computeRegionBlockNumberOfExecutions( // Query RegionBranchOpInterface interface if it is available. if (auto regionInterface = dyn_cast(parentOp)) { SmallVector operands(parentOp->getNumOperands()); - for (auto operandIt : llvm::enumerate(parentOp->getOperands())) + for (const auto &operandIt : llvm::enumerate(parentOp->getOperands())) matchPattern(operandIt.value(), m_Constant(&operands[operandIt.index()])); regionInterface.getNumRegionInvocations(operands, numRegionsInvocations); diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp index b45ee4c0faae..fa78a804175d 100644 --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -86,7 +86,7 @@ static void getBackwardSliceImpl(Operation *op, if (filter && !filter(op)) return; - for (auto en : llvm::enumerate(op->getOperands())) { + for (const auto &en : llvm::enumerate(op->getOperands())) { auto operand = en.value(); if (auto *definingOp = operand.getDefiningOp()) { if (backwardSlice->count(definingOp) == 0) diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp index ccdd159fd438..be2abcdd501f 100644 --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -1155,7 +1155,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList, resultTypes.reserve(resultTypeList.size()); if (resultSegmentSpecObj.is_none()) { // Non-variadic result unpacking. - for (auto it : llvm::enumerate(resultTypeList)) { + for (const auto &it : llvm::enumerate(resultTypeList)) { try { resultTypes.push_back(py::cast(it.value())); if (!resultTypes.back()) @@ -1179,7 +1179,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList, .str()); } resultSegmentLengths.reserve(resultTypeList.size()); - for (auto it : + for (const auto &it : llvm::enumerate(llvm::zip(resultTypeList, resultSegmentSpec))) { int segmentSpec = std::get<1>(it.value()); if (segmentSpec == 1 || segmentSpec == 0) { @@ -1240,7 +1240,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList, operands.reserve(operands.size()); if (operandSegmentSpecObj.is_none()) { // Non-sized operand unpacking. - for (auto it : llvm::enumerate(operandList)) { + for (const auto &it : llvm::enumerate(operandList)) { try { operands.push_back(py::cast(it.value())); if (!operands.back()) @@ -1264,7 +1264,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList, .str()); } operandSegmentLengths.reserve(operandList.size()); - for (auto it : + for (const auto &it : llvm::enumerate(llvm::zip(operandList, operandSegmentSpec))) { int segmentSpec = std::get<1>(it.value()); if (segmentSpec == 1 || segmentSpec == 0) { diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp index 8c269ee8a4df..828f0ef15120 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -21,7 +21,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor, SmallVector workgroupBuffers; workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions()); - for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) { + for (const auto &en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) { Value attribution = en.value(); auto type = attribution.getType().dyn_cast(); @@ -88,7 +88,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor, if (!workgroupBuffers.empty()) zero = rewriter.create(loc, i32Type, rewriter.getI32IntegerAttr(0)); - for (auto en : llvm::enumerate(workgroupBuffers)) { + for (const auto &en : llvm::enumerate(workgroupBuffers)) { LLVM::GlobalOp global = en.value(); Value address = rewriter.create(loc, global); auto elementType = @@ -111,7 +111,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor, // Rewrite private memory attributions to alloca'ed buffers. unsigned numWorkgroupAttributions = gpuFuncOp.getNumWorkgroupAttributions(); auto int64Ty = IntegerType::get(rewriter.getContext(), 64); - for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) { + for (const auto &en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) { Value attribution = en.value(); auto type = attribution.getType().cast(); assert(type && type.hasStaticShape() && "unexpected type in attribution"); diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp index 757f3828bdc7..f7f8b6b14235 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -634,7 +634,7 @@ Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray( arraySize, /*alignment=*/0); auto zero = builder.create(loc, llvmInt32Type, builder.getI32IntegerAttr(0)); - for (auto en : llvm::enumerate(arguments)) { + for (const auto &en : llvm::enumerate(arguments)) { auto index = builder.create( loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); auto fieldPtr = builder.create( diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp index 7405f6f91a4f..96dd32aaa99d 100644 --- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp +++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp @@ -206,7 +206,7 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, TypeConverter &typeConverter, // LowerABIAttributesPass. TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs()); { - for (auto argType : enumerate(funcOp.getType().getInputs())) { + for (const auto &argType : enumerate(funcOp.getType().getInputs())) { auto convertedType = typeConverter.convertType(argType.value()); signatureConverter.addInputs(argType.index(), convertedType); } diff --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp index b8d1a8556395..e7e64aece05d 100644 --- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp +++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp @@ -222,7 +222,7 @@ void VulkanLaunchFuncToVulkanCallsPass::createBindMemRefCalls( Value descriptorSet = builder.create( loc, getInt32Type(), builder.getI32IntegerAttr(0)); - for (auto en : + for (const auto &en : llvm::enumerate(cInterfaceVulkanLaunchCallOp.getOperands().drop_front( kVulkanLaunchNumConfigOperands))) { // Create LLVM constant for the descriptor binding index. diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp index 47dabc90bce5..0003bd859e47 100644 --- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp +++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp @@ -213,11 +213,11 @@ MemRefDescriptor ConvertToLLVMPattern::createMemRefDescriptor( createIndexConstant(rewriter, loc, 0)); // Fields 4: Sizes. - for (auto en : llvm::enumerate(sizes)) + for (const auto &en : llvm::enumerate(sizes)) memRefDescriptor.setSize(rewriter, loc, en.index(), en.value()); // Field 5: Strides. - for (auto en : llvm::enumerate(strides)) + for (const auto &en : llvm::enumerate(strides)) memRefDescriptor.setStride(rewriter, loc, en.index(), en.value()); return memRefDescriptor; diff --git a/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp b/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp index ace5bec09f4e..54c5b93877ff 100644 --- a/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp +++ b/mlir/lib/Conversion/LLVMCommon/VectorPattern.cpp @@ -101,7 +101,7 @@ LogicalResult LLVM::detail::handleMultidimensionalVectors( // For this unrolled `position` corresponding to the `linearIndex`^th // element, extract operand vectors SmallVector extractedOperands; - for (auto operand : llvm::enumerate(operands)) { + for (const auto &operand : llvm::enumerate(operands)) { extractedOperands.push_back(rewriter.create( loc, operand1DVectorTypes[operand.index()], operand.value(), position)); diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp index b1f7d0452ee1..9142be183174 100644 --- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -1420,7 +1420,8 @@ public: targetMemRef.setOffset(rewriter, loc, viewMemRef.offset(rewriter, loc)); // Iterate over the dimensions and apply size/stride permutation. - for (auto en : llvm::enumerate(transposeOp.permutation().getResults())) { + for (const auto &en : + llvm::enumerate(transposeOp.permutation().getResults())) { int sourcePos = en.index(); int targetPos = en.value().cast().getPosition(); targetMemRef.setSize(rewriter, loc, targetPos, diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp index 7db7dc03dc80..367bbb55ee1b 100644 --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -736,7 +736,7 @@ void PatternLowering::generateRewriter( bool seenVariableLength = false; Type valueTy = builder.getType(); Type valueRangeTy = pdl::RangeType::get(valueTy); - for (auto it : llvm::enumerate(resultTys)) { + for (const auto &it : llvm::enumerate(resultTys)) { Value &type = rewriteValues[it.value()]; if (type) continue; @@ -862,7 +862,7 @@ void PatternLowering::generateOperationResultTypeRewriter( // Otherwise, handle inference for each of the result types individually. OperandRange resultTypeValues = op.types(); types.reserve(resultTypeValues.size()); - for (auto it : llvm::enumerate(resultTypeValues)) { + for (const auto &it : llvm::enumerate(resultTypeValues)) { Value resultType = it.value(); // Check for an already translated value. diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp index 517f28c2044f..c325bfb42456 100644 --- a/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PredicateTree.cpp @@ -162,7 +162,7 @@ static void getTreePredicates(std::vector &predList, builder.getAllOperands(opPos)); } else { bool foundVariableLength = false; - for (auto operandIt : llvm::enumerate(operands)) { + for (const auto &operandIt : llvm::enumerate(operands)) { bool isVariadic = operandIt.value().getType().isa(); foundVariableLength |= isVariadic; @@ -460,7 +460,7 @@ static void buildCostGraph(ArrayRef roots, RootOrderingGraph &graph, } // Default case: visit all the operands. - for (auto p : llvm::enumerate(operationOp.operands())) + for (const auto &p : llvm::enumerate(operationOp.operands())) toVisit.emplace(p.value(), entry.value, p.index(), entry.depth + 1); }) diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp index f3547e580501..d2faff9d3238 100644 --- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp +++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp @@ -261,7 +261,7 @@ void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp, builder.setInsertionPointToStart(&launchOp.body().front()); auto *lbArgumentIt = lbs.begin(); auto *stepArgumentIt = steps.begin(); - for (auto en : llvm::enumerate(ivs)) { + for (const auto &en : llvm::enumerate(ivs)) { Value id = en.index() < numBlockDims ? getDim3Value(launchOp.getBlockIds(), en.index()) diff --git a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp index fd6ec8208620..6bb3da666ce7 100644 --- a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp +++ b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp @@ -387,7 +387,7 @@ WhileOpConversion::matchAndRewrite(scf::WhileOp whileOp, OpAdaptor adaptor, // the before region, which may not matching the whole op's result. Instead, // the scf.condition op returns values matching the whole op's results. So we // need to create/load/store variables according to that. - for (auto it : llvm::enumerate(condArgs)) { + for (const auto &it : llvm::enumerate(condArgs)) { auto res = it.value(); auto i = it.index(); auto pointerType = diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp index bf60f4b6a211..9f1f93f9abf7 100644 --- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp @@ -208,7 +208,7 @@ class GPULaunchLowering : public ConvertOpToLLVMPattern { SmallVector copyInfo; auto numKernelOperands = launchOp.getNumKernelOperands(); auto kernelOperands = adaptor.getOperands().take_back(numKernelOperands); - for (auto operand : llvm::enumerate(kernelOperands)) { + for (const auto &operand : llvm::enumerate(kernelOperands)) { // Check if the kernel's operand is a ranked memref. auto memRefType = launchOp.getKernelOperand(operand.index()) .getType() diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp index feaa140cc710..88c7f43b8dc5 100644 --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -254,7 +254,7 @@ protected: rewriter.getNamedAttr(function_like_impl::getArgDictAttrName(), rewriter.getArrayAttr(newArgAttrs))); } - for (auto pair : llvm::enumerate(attributes)) { + for (const auto &pair : llvm::enumerate(attributes)) { if (pair.value().getName() == "llvm.linkage") { attributes.erase(attributes.begin() + pair.index()); break; diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp index 38c8276f2843..f28527d185c1 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -694,7 +694,7 @@ elementwiseMatchAndRewriteHelper(Operation *operation, SmallVector newShape; SmallVector affineExprs; newShape.reserve(type.getRank()); - for (auto it : llvm::enumerate(type.getShape())) { + for (const auto &it : llvm::enumerate(type.getShape())) { if (it.value() == resultTy.getDimSize(it.index())) { newShape.push_back(it.value()); affineExprs.push_back( @@ -1175,7 +1175,7 @@ public: SmallVector inputExprs; inputExprs.resize(resultTy.getRank()); auto operandTy = input.getType().cast(); - for (auto permutation : llvm::enumerate(perms.getValues())) { + for (const auto &permutation : llvm::enumerate(perms.getValues())) { auto index = permutation.index(); auto value = permutation.value().getZExtValue(); if (!operandTy.hasRank() || operandTy.isDynamicDim(index)) { diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp index 30bad881a319..725264d31fc7 100644 --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -449,7 +449,7 @@ static void convertForOp(scf::ForOp op, llvm::DenseMap &valueMapping) { SmallVector newOperands; SmallVector> argMapping; - for (auto operand : llvm::enumerate(op.getIterOperands())) { + for (const auto &operand : llvm::enumerate(op.getIterOperands())) { auto it = valueMapping.find(operand.value()); if (it == valueMapping.end()) continue; @@ -474,7 +474,7 @@ static void convertYieldOp(scf::YieldOp op, OpBuilder b(op); auto loop = cast(op->getParentOp()); auto yieldOperands = llvm::to_vector<4>(op.getOperands()); - for (auto operand : llvm::enumerate(op.getOperands())) { + for (const auto &operand : llvm::enumerate(op.getOperands())) { auto it = valueMapping.find(operand.value()); if (it == valueMapping.end()) continue; diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index 062a54432cea..0a938430a5b9 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -497,7 +497,7 @@ public: eltType = llvmType.cast().getElementType(); Value insert = rewriter.create(loc, llvmType); int64_t insPos = 0; - for (auto en : llvm::enumerate(maskArrayAttr)) { + for (const auto &en : llvm::enumerate(maskArrayAttr)) { int64_t extPos = en.value().cast().getInt(); Value value = adaptor.v1(); if (extPos >= v1Dim) { @@ -883,7 +883,8 @@ public: desc.setOffset(rewriter, loc, zero); // Fill size and stride descriptors in memref. - for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) { + for (const auto &indexedSize : + llvm::enumerate(targetMemRefType.getShape())) { int64_t index = indexedSize.index(); auto sizeAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp index c3c1b5129480..c89588e0b87b 100644 --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -680,7 +680,7 @@ static void composeAffineMapAndOperands(AffineMap *map, for (auto *container : {&dims, &syms}) { bool isDim = (container == &dims); auto &repls = isDim ? dimReplacements : symReplacements; - for (auto en : llvm::enumerate(*container)) { + for (const auto &en : llvm::enumerate(*container)) { Value v = en.value(); if (!v) { assert(isDim ? !map->isFunctionOfDim(en.index()) diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp index 579c385d653c..ea8ce177848e 100644 --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -940,7 +940,7 @@ static LogicalResult verify(gpu::ReturnOp returnOp) { .attachNote(function.getLoc()) .append("return type declared here"); - for (auto pair : llvm::enumerate( + for (const auto &pair : llvm::enumerate( llvm::zip(function.getType().getResults(), returnOp.operands()))) { Type type; Value operand; diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp index ac5272348438..a2e64d9c92a1 100644 --- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp @@ -54,7 +54,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody, createForAllDimensions(builder, loc, indexOps); // Replace the leading 12 function args with the respective thread/block index // operations. Iterate backwards since args are erased and indices change. - for (auto indexOp : enumerate(indexOps)) + for (const auto &indexOp : enumerate(indexOps)) map.map(firstBlock.getArgument(indexOp.index()), indexOp.value()); } @@ -174,7 +174,7 @@ static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp, // Map arguments from gpu.launch region to the arguments of the gpu.func // operation. Block &entryBlock = outlinedFuncBody.front(); - for (auto operand : enumerate(operands)) + for (const auto &operand : enumerate(operands)) map.map(operand.value(), entryBlock.getArgument(operand.index())); // Clone the region of the gpu.launch operation into the gpu.func operation. diff --git a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp index 55098a9c5b46..f7c5ca8d5a77 100644 --- a/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/MemoryPromotion.cpp @@ -89,7 +89,7 @@ static void insertCopyLoops(ImplicitLocOpBuilder &b, Value from, Value to) { }); // Map the innermost loops to threads in reverse order. - for (auto en : + for (const auto &en : llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back( GPUDialect::getNumWorkgroupDimensions())))) { Value v = en.value(); diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp index 66157371f30c..77f436f109ca 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -1485,7 +1485,7 @@ static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) { // list is parsed, returns -1. static int parseOptionalKeywordAlternative(OpAsmParser &parser, ArrayRef keywords) { - for (auto en : llvm::enumerate(keywords)) { + for (const auto &en : llvm::enumerate(keywords)) { if (succeeded(parser.parseOptionalKeyword(en.value()))) return en.index(); } diff --git a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp index 70117e533b19..ee5c6da544b0 100644 --- a/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp +++ b/mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp @@ -103,7 +103,7 @@ LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, FuncOp f) { LinalgDependenceGraph::LinalgDependenceGraph(Aliases &aliases, ArrayRef ops) : aliases(aliases), linalgOps(ops.begin(), ops.end()) { - for (auto en : llvm::enumerate(linalgOps)) { + for (const auto &en : llvm::enumerate(linalgOps)) { linalgOpPositions.insert( std::make_pair(en.value().getOperation(), en.index())); } diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index fe3363d7d0de..083d8b75463a 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -1093,7 +1093,7 @@ static LogicalResult verify(PadTensorOp op) { return op.emitError("expected the block to have ") << rank << " arguments"; // Note: the number and type of yield values are checked in the YieldOp. - for (auto en : llvm::enumerate(block.getArgumentTypes())) { + for (const auto &en : llvm::enumerate(block.getArgumentTypes())) { if (!en.value().isIndex()) return op.emitOpError("expected block argument ") << (en.index() + 1) << " to be an index"; @@ -1204,7 +1204,7 @@ PadTensorOp PadTensorOp::createPadHighOp(Type type, Value source, Value pad, SmallVector low, high; auto rankedTensorType = type.cast(); assert(rankedTensorType.hasStaticShape()); - for (auto en : enumerate(rankedTensorType.getShape())) { + for (const auto &en : enumerate(rankedTensorType.getShape())) { AffineExpr d0; bindDims(b.getContext(), d0); auto dimOp = b.createOrFold(loc, source, en.index()); @@ -1275,7 +1275,7 @@ SmallVector PadTensorOp::getIterationDomain(OpBuilder &b) { // Initialize all the ranges to {zero, one, one}. All the `ub`s are // overwritten. SmallVector loopRanges(reifiedShapes[0].size(), {zero, one, one}); - for (auto ub : enumerate(reifiedShapes[0])) + for (const auto &ub : enumerate(reifiedShapes[0])) loopRanges[ub.index()].size = ub.value(); return loopRanges; } @@ -2001,7 +2001,7 @@ struct TiledLoopInputsFolder : public OpRewritePattern { // Store ids of the corresponding old and new input operands. SmallVector oldInputIdToNew(tiledLoop.inputs().size(), kNoMatch); - for (auto en : llvm::enumerate( + for (const auto &en : llvm::enumerate( llvm::zip(tiledLoop.inputs(), tiledLoop.getRegionInputArgs()))) { Value in, bbArg; size_t index = en.index(); @@ -2215,7 +2215,7 @@ struct TiledLoopResultsFolder : public OpRewritePattern { SmallVector oldResultIdToNew(tiledLoop.getNumResults(), kNoMatch); SmallVector resultReplacement(tiledLoop.getNumResults()); - for (auto en : llvm::enumerate( + for (const auto &en : llvm::enumerate( llvm::zip(tiledLoop.outputs(), tiledLoop.getRegionOutputArgs()))) { size_t index = en.index(); Value out = std::get<0>(en.value()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp index 32d536384331..da01ec496bec 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp @@ -43,7 +43,7 @@ allocateBuffersForResults(Location loc, LinalgOp linalgOp, ValueRange outputs, // Allocate a buffer for every tensor result. assert(linalgOp.getNumOutputs() == linalgOp->getNumResults()); - for (auto en : llvm::enumerate(linalgOp->getResultTypes())) { + for (const auto &en : llvm::enumerate(linalgOp->getResultTypes())) { size_t resultIndex = en.index(); Type resultType = en.value(); diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp index 521fdd9d2e89..eaf95a3751a8 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @@ -186,7 +186,7 @@ struct FoldUnitDimLoops : public OpRewritePattern { DenseSet unitDims; SmallVector unitDimsReductionLoops; ArrayAttr iteratorTypes = genericOp.iterator_types(); - for (auto expr : enumerate(invertedMap.getResults())) { + for (const auto &expr : enumerate(invertedMap.getResults())) { if (AffineDimExpr dimExpr = expr.value().dyn_cast()) if (dims[dimExpr.getPosition()] == 1) unitDims.insert(expr.index()); @@ -205,7 +205,7 @@ struct FoldUnitDimLoops : public OpRewritePattern { // Compute the iterator types of the modified op by dropping the one-trip // count loops. SmallVector newIteratorTypes; - for (auto attr : llvm::enumerate(iteratorTypes)) { + for (const auto &attr : llvm::enumerate(iteratorTypes)) { if (!unitDims.count(attr.index())) newIteratorTypes.push_back(attr.value()); } @@ -439,7 +439,7 @@ struct ReplaceUnitExtents : public OpRewritePattern { // If any result tensor has a modified shape, then add reshape to recover // the original shape. SmallVector resultReplacements; - for (auto result : llvm::enumerate(replacementOp.getResults())) { + for (const auto &result : llvm::enumerate(replacementOp.getResults())) { unsigned index = result.index() + replacementOp.getNumInputs(); auto origResultType = genericOp.getResult(result.index()).getType(); @@ -465,7 +465,7 @@ static Optional> getReassociationMapForFoldingUnitDims(ArrayRef mixedSizes) { SmallVector reassociation; ReassociationIndices curr; - for (auto it : llvm::enumerate(mixedSizes)) { + for (const auto &it : llvm::enumerate(mixedSizes)) { auto dim = it.index(); auto size = it.value(); curr.push_back(dim); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp index 795a23d7b1d8..6fd3927c80ca 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -565,7 +565,7 @@ LogicalResult ExpansionInfo::compute(LinalgOp linalgOp, // dimension of the original op. SmallVector numExpandedDims(fusedIndexMap.getNumDims(), 1); expandedShapeMap.resize(fusedIndexMap.getNumDims()); - for (auto resultExpr : llvm::enumerate(fusedIndexMap.getResults())) { + for (const auto &resultExpr : llvm::enumerate(fusedIndexMap.getResults())) { unsigned pos = resultExpr.value().cast().getPosition(); AffineMap foldedDims = reassociationMaps[resultExpr.index()]; numExpandedDims[pos] = foldedDims.getNumResults(); @@ -581,7 +581,7 @@ LogicalResult ExpansionInfo::compute(LinalgOp linalgOp, // Compute reassociation map from the original op to the expanded op. unsigned sum = 0; reassociation.reserve(fusedIndexMap.getNumDims()); - for (auto numFoldedDim : llvm::enumerate(numExpandedDims)) { + for (const auto &numFoldedDim : llvm::enumerate(numExpandedDims)) { auto seq = llvm::seq(sum, sum + numFoldedDim.value()); reassociation.emplace_back(seq.begin(), seq.end()); sum += numFoldedDim.value(); @@ -861,7 +861,7 @@ struct FoldProducerReshapeOpByLinearization if (!genericOp.hasTensorSemantics()) return failure(); SmallVector inputOperands = genericOp.getInputOperands(); - for (auto en : llvm::enumerate(inputOperands)) { + for (const auto &en : llvm::enumerate(inputOperands)) { auto reshapeOp = en.value()->get().getDefiningOp(); if (!reshapeOp) continue; @@ -976,7 +976,7 @@ struct PushExpandingReshape : public OpRewritePattern { // 1. Look for tensor_expand_shape operands and figure out save the // dimensions merged. SmallVector inputOperands = genericOp.getInputOperands(); - for (auto en : llvm::enumerate(inputOperands)) { + for (const auto &en : llvm::enumerate(inputOperands)) { auto reshapeOp = en.value()->get().template getDefiningOp(); if (!reshapeOp) @@ -1010,7 +1010,7 @@ struct PushExpandingReshape : public OpRewritePattern { // 2. Verify that we can merge the dimensions in the linalg and that we // don't need to create new reshapes operands. Inserting new reshape // operands would defeat the purpose of the transformation. - for (auto en : llvm::enumerate(inputOperands)) { + for (const auto &en : llvm::enumerate(inputOperands)) { if (en.value()->get() == newOperands[en.index()]) { AffineMap map = genericOp.getTiedIndexingMap(en.value()); for (unsigned i : llvm::seq(unsigned(0), map.getNumResults())) { @@ -1060,7 +1060,7 @@ struct PushExpandingReshape : public OpRewritePattern { newOp.region().begin()); // 6. Reshape the so that the type matches the uses. SmallVector newResults; - for (auto result : llvm::enumerate(newOp->getResults())) { + for (const auto &result : llvm::enumerate(newOp->getResults())) { newResults.push_back(rewriter.create( genericOp->getLoc(), genericOp.getOutputTensorTypes()[result.index()], result.value(), reassociation)); @@ -1407,7 +1407,7 @@ public: // All inputs should be constants. int numInputs = genericOp.getNumInputs(); SmallVector inputValues(numInputs); - for (auto operand : llvm::enumerate(genericOp.getInputOperands())) { + for (const auto &operand : llvm::enumerate(genericOp.getInputOperands())) { if (!matchPattern(operand.value()->get(), m_Constant(&inputValues[operand.index()]))) return failure(); @@ -1712,7 +1712,7 @@ struct RemoveOutsDependency : public OpRewritePattern { continue; modifiedOutput = true; SmallVector dynamicDims; - for (auto dim : llvm::enumerate(operandType.getShape())) { + for (const auto &dim : llvm::enumerate(operandType.getShape())) { if (dim.value() != ShapedType::kDynamicSize) continue; dynamicDims.push_back(rewriter.createOrFold( diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp index 0e0bc1ad48d1..f426af01d872 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp @@ -87,7 +87,7 @@ getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth, LLVM_DEBUG(llvm::dbgs() << "getShapeDefiningLoopRange map: " << map << "\n"); SmallVector shapeRanges(map.getNumResults(), nullptr); - for (auto en : llvm::enumerate(map.getResults())) { + for (const auto &en : llvm::enumerate(map.getResults())) { auto dimExpr = en.value().dyn_cast(); if (!dimExpr) continue; @@ -250,7 +250,7 @@ static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap, LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n"); DenseMap fusedLoopsAndRanges; Value shapedOperand = consumerOpOperand.get(); - for (auto en : llvm::enumerate(producerMap.getResults())) { + for (const auto &en : llvm::enumerate(producerMap.getResults())) { unsigned posInProducerLoop = en.value().cast().getPosition(); fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape( b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index()); @@ -521,7 +521,7 @@ mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult, static AffineMap pruneReductionDimsFromMap(ArrayRef iteratorTypes, AffineMap map) { llvm::SmallDenseSet projectedDims; - for (auto attr : llvm::enumerate(iteratorTypes)) { + for (const auto &attr : llvm::enumerate(iteratorTypes)) { if (!isParallelIterator(attr.value())) projectedDims.insert(attr.index()); } @@ -810,7 +810,7 @@ fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp, SmallVector fusedOps(fusionCandidates.size()); DenseMap origOpToFusedOp; origOpToFusedOp[rootOp.getOperation()] = tiledOp; - for (auto candidate : enumerate(llvm::reverse(fusionCandidates))) { + for (const auto &candidate : enumerate(llvm::reverse(fusionCandidates))) { LinalgOp origOp = candidate.value(); LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges); origOpToFusedOp[origOp.getOperation()] = fusedOp; diff --git a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp index c171de854880..6bdcc192e27a 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp @@ -42,7 +42,7 @@ static SmallVector getTiledSliceDims(OpOperand *consumerOperand, // Search the slice dimensions tiled by a tile loop dimension. DenseSet tiledSliceDimIndices; - for (auto en : enumerate(indexingMap.getResults())) { + for (const auto &en : enumerate(indexingMap.getResults())) { for (auto tiledLoopDim : tiledLoopDims) { if (en.value().isFunctionOfDim(tiledLoopDim)) tiledSliceDimIndices.insert(en.index()); @@ -304,7 +304,7 @@ LogicalResult TileLoopNest::tileRootOp(OpBuilder &b, // Update the root operation and append the loops and tile loop dimensions. rootOp = tiledRootOp->op; tileLoopOps.append(tiledRootOp->loops.begin(), tiledRootOp->loops.end()); - for (auto en : enumerate(tileSizes)) { + for (const auto &en : enumerate(tileSizes)) { // Copy only the tiled loop dimensions with non-zero tile size. if (en.value() == 0) continue; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp index 97b586cdf762..d8875663487d 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp @@ -346,7 +346,7 @@ void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) { changed = false; func.walk([&](scf::ForOp forOp) { Operation *yield = forOp.getBody()->getTerminator(); - for (auto it : llvm::enumerate(forOp.getRegionIterArgs())) { + for (const auto &it : llvm::enumerate(forOp.getRegionIterArgs())) { OpOperand &ret = yield->getOpOperand(it.index()); HoistableWrite write = getLoopInvariantTransferWriteOpDefining(forOp, ret); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp index 6a9fb2189855..d3936eb366cc 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp @@ -277,7 +277,7 @@ struct TiledLoopToSCFPattern : public OpRewritePattern { // Collect loop control parameters for parallel and sequential dimensions. SmallVector seqLBs, seqUBs, seqSteps, seqIVs; SmallVector parLBs, parUBs, parSteps, parIVs; - for (auto en : llvm::enumerate( + for (const auto &en : llvm::enumerate( llvm::zip(tiledLoop.lowerBound(), tiledLoop.upperBound(), tiledLoop.step(), tiledLoop.getInductionVars()))) { Value lb, ub, step, iv; diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp index 7fa2aed8dfd8..fb281b319f67 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -87,7 +87,7 @@ defaultAllocBufferCallBack(const LinalgPromotionOptions &options, auto one = b.createOrFold(1); Value allocSize = one; - for (auto size : llvm::enumerate(boundingSubViewSize)) + for (const auto &size : llvm::enumerate(boundingSubViewSize)) allocSize = b.createOrFold(allocSize, size.value()); Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize, layout, alignment); @@ -219,7 +219,7 @@ FailureOr mlir::linalg::promoteSubviewAsNewBuffer( SmallVector partialSizes; fullSizes.reserve(rank); partialSizes.reserve(rank); - for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { + for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { auto rangeValue = en.value(); // Try to extract a tight constant. LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n"); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp index b4d2860101fd..cb2987973ea5 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -181,7 +181,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes, b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes); SmallVector iteratorTypes; - for (auto attr : + for (const auto &attr : enumerate(op.iterator_types().cast().getValue())) { if (loopIndexToRangeIndex.count(attr.index())) iteratorTypes.push_back(attr.value()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp index 6d2af0c1cece..8156c5d45744 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -194,7 +194,7 @@ static LogicalResult padOperandToSmallestStaticBoundingBox( SmallVector staticSizes; staticSizes.reserve(shape.size()); auto shapedOp = cast(sliceOp.getOperation()); - for (auto en : enumerate(shapedOp.getMixedSizes())) { + for (const auto &en : enumerate(shapedOp.getMixedSizes())) { // Skip dropped dimensions. if (droppedDims.contains(en.index())) continue; @@ -269,7 +269,7 @@ linalg::rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad, // linalg op around because it uses the dims of the original results. SmallVector paddedSubviewResults; paddedSubviewResults.reserve(opToPad->getNumResults()); - for (auto en : llvm::enumerate(paddedOp->getResults())) { + for (const auto &en : llvm::enumerate(paddedOp->getResults())) { Value paddedResult = en.value(); int64_t resultNumber = en.index(); int64_t rank = paddedResult.getType().cast().getRank(); @@ -443,7 +443,7 @@ LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite( // Tile the unfused loops; SmallVector unfusedLoopTileSizes; Value zero = rewriter.create(op->getLoc(), 0); - for (auto tileSize : enumerate(tileSizes)) { + for (const auto &tileSize : enumerate(tileSizes)) { if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index())) unfusedLoopTileSizes.push_back(zero); else @@ -524,7 +524,7 @@ LogicalResult mlir::linalg::LinalgPaddingPattern::matchAndRewrite( } // Hoist the padding. - for (auto en : enumerate(depths)) { + for (const auto &en : enumerate(depths)) { OpOperand &opOperand = paddedOp->getOpOperand(en.index()); auto padTensorOp = opOperand.get().getDefiningOp(); if (!padTensorOp || en.value() == 0) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index ab22ee0e0de5..5fda632b2f86 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -249,7 +249,7 @@ vectorizeLinalgYield(OpBuilder &b, Operation *op, auto yieldOp = dyn_cast(op); if (!yieldOp) return VectorizationResult{VectorizationStatus::Failure, nullptr}; - for (auto outputs : llvm::enumerate(yieldOp.values())) { + for (const auto &outputs : llvm::enumerate(yieldOp.values())) { // TODO: Scan for an opportunity for reuse. // TODO: use a map. Value vectorValue = bvm.lookup(outputs.value()); diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp index a54582493a36..a197c141403b 100644 --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -169,7 +169,7 @@ Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim) { SmallVector getDynOperands(Location loc, Value val, OpBuilder &b) { SmallVector dynOperands; auto shapedType = val.getType().cast(); - for (auto dim : llvm::enumerate(shapedType.getShape())) { + for (const auto &dim : llvm::enumerate(shapedType.getShape())) { if (dim.value() == ShapedType::kDynamicSize) dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index())); } @@ -310,7 +310,7 @@ tensor::ExtractSliceOp makeComposedExtractSliceOp( SmallVector foldedOffsets(offsets.begin(), offsets.end()); AffineExpr dim1, dim2; bindDims(b.getContext(), dim1, dim2); - for (auto en : enumerate(producerOp.getMixedOffsets())) { + for (const auto &en : enumerate(producerOp.getMixedOffsets())) { SmallVector offsetValues = { getValueOrCreateConstantIndexOp(b, loc, foldedOffsets[en.index()]), getValueOrCreateConstantIndexOp(b, loc, en.value())}; @@ -403,7 +403,7 @@ void GenerateLoopNest::doit( if (distributionOptions.hasValue()) { // Collect loop ranges for parallel dimensions. SmallVector parallelLoopRanges; - for (auto iteratorType : enumerate(iteratorTypes)) + for (const auto &iteratorType : enumerate(iteratorTypes)) if (isParallelIterator(iteratorType.value())) parallelLoopRanges.push_back(loopRanges[iteratorType.index()]); @@ -435,7 +435,7 @@ void GenerateLoopNest::doit( // Filter out scf.for loops that were created out of parallel dimensions. SmallVector loops; - for (auto iteratorType : enumerate(iteratorTypes)) + for (const auto &iteratorType : enumerate(iteratorTypes)) if (isParallelIterator(iteratorType.value())) loops.push_back(loopNest.loops[iteratorType.index()]); @@ -677,7 +677,7 @@ void GenerateLoopNest::doit( distributionMethod.assign(distributionOptions->distributionMethod.begin(), distributionOptions->distributionMethod.end()); SmallVector parallelLoopRanges; - for (auto iteratorType : enumerate(iteratorTypes)) { + for (const auto &iteratorType : enumerate(iteratorTypes)) { if (isParallelIterator(iteratorType.value())) parallelLoopRanges.push_back(loopRanges[iteratorType.index()]); } @@ -686,7 +686,7 @@ void GenerateLoopNest::doit( SmallVector procInfo = options.procInfo(b, loc, parallelLoopRanges); unsigned index = 0; - for (auto iteratorType : enumerate(iteratorTypes)) { + for (const auto &iteratorType : enumerate(iteratorTypes)) { if (index >= procInfo.size()) break; if (isParallelIterator(iteratorType.value())) { diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp index d2f989b561e4..ced119aea1a4 100644 --- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp +++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp @@ -395,7 +395,7 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) { }; if (!checkCompatible(aOffset, bOffset)) return false; - for (auto aStride : enumerate(aStrides)) + for (const auto &aStride : enumerate(aStrides)) if (!checkCompatible(aStride.value(), bStrides[aStride.index()])) return false; } @@ -515,7 +515,7 @@ computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType, if (originalType.getRank() == reducedType.getRank()) return unusedDims; - for (auto dim : llvm::enumerate(sizes)) + for (const auto &dim : llvm::enumerate(sizes)) if (auto attr = dim.value().dyn_cast()) if (attr.cast().getInt() == 1) unusedDims.insert(dim.index()); @@ -1851,7 +1851,7 @@ static MemRefType getCanonicalSubViewResultType( if (!unusedDims) return nullptr; SmallVector shape; - for (auto sizes : llvm::enumerate(nonRankReducedType.getShape())) { + for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) { if (unusedDims->count(sizes.index())) continue; shape.push_back(sizes.value()); @@ -1903,7 +1903,7 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) { // Check all size values are static and matches the (static) source shape. ArrayRef sourceShape = subViewOp.getSourceType().getShape(); - for (auto size : llvm::enumerate(mixedSizes)) { + for (const auto &size : llvm::enumerate(mixedSizes)) { Optional intValue = getConstantIntValue(size.value()); if (!intValue || intValue.getValue() != sourceShape[size.index()]) return false; @@ -2040,7 +2040,7 @@ static MemRefType inferTransposeResultType(MemRefType memRefType, auto originalSizes = memRefType.getShape(); // Compute permuted sizes. SmallVector sizes(rank, 0); - for (auto en : llvm::enumerate(permutationMap.getResults())) + for (const auto &en : llvm::enumerate(permutationMap.getResults())) sizes[en.index()] = originalSizes[en.value().cast().getPosition()]; diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 0b4346ddd08d..1f004c6c9950 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -249,7 +249,7 @@ verifyScheduleModifiers(OpAsmParser &parser, SmallVectorImpl> &modifiers) { if (modifiers.size() > 2) return parser.emitError(parser.getNameLoc()) << " unexpected modifier(s)"; - for (auto mod : modifiers) { + for (const auto &mod : modifiers) { // Translate the string. If it has no value, then it was not a valid // modifier! auto symbol = symbolizeScheduleModifier(mod); diff --git a/mlir/lib/Dialect/PDL/IR/PDL.cpp b/mlir/lib/Dialect/PDL/IR/PDL.cpp index b9e5415dadcc..2a399ec2169e 100644 --- a/mlir/lib/Dialect/PDL/IR/PDL.cpp +++ b/mlir/lib/Dialect/PDL/IR/PDL.cpp @@ -198,7 +198,7 @@ static LogicalResult verifyResultTypesAreInferrable(OperationOp op, return success(); // Otherwise, make sure each of the types can be inferred. - for (auto it : llvm::enumerate(resultTypes)) { + for (const auto &it : llvm::enumerate(resultTypes)) { Operation *resultTypeOp = it.value().getDefiningOp(); assert(resultTypeOp && "expected valid result type operation"); diff --git a/mlir/lib/Dialect/SCF/SCF.cpp b/mlir/lib/Dialect/SCF/SCF.cpp index 1ca7e49c5a3a..dd47a55fe6b9 100644 --- a/mlir/lib/Dialect/SCF/SCF.cpp +++ b/mlir/lib/Dialect/SCF/SCF.cpp @@ -1247,7 +1247,7 @@ struct RemoveUnusedResults : public OpRewritePattern { // Replace the operation by the new one. SmallVector repResults(op.getNumResults()); - for (auto en : llvm::enumerate(usedResults)) + for (const auto &en : llvm::enumerate(usedResults)) repResults[en.value().getResultNumber()] = newOp.getResult(en.index()); rewriter.replaceOp(op, repResults); return success(); @@ -1296,7 +1296,8 @@ struct ConvertTrivialIfToSelect : public OpRewritePattern { SmallVector results(op->getNumResults()); assert(thenYieldArgs.size() == results.size()); assert(elseYieldArgs.size() == results.size()); - for (auto it : llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) { + for (const auto &it : + llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) { Value trueVal = std::get<0>(it.value()); Value falseVal = std::get<1>(it.value()); if (trueVal == falseVal) @@ -1564,7 +1565,7 @@ struct CombineIfs : public OpRewritePattern { SmallVector prevValues; SmallVector nextValues; - for (auto pair : llvm::enumerate(combinedIf.getResults())) { + for (const auto &pair : llvm::enumerate(combinedIf.getResults())) { if (pair.index() < prevIf.getNumResults()) prevValues.push_back(pair.value()); else @@ -2368,7 +2369,7 @@ struct WhileUnusedResult : public OpRewritePattern { SmallVector newResultTypes; SmallVector newTermArgs; bool needUpdate = false; - for (auto it : + for (const auto &it : llvm::enumerate(llvm::zip(op.getResults(), afterArgs, termArgs))) { auto i = static_cast(it.index()); Value result = std::get<0>(it.value()); @@ -2403,7 +2404,7 @@ struct WhileUnusedResult : public OpRewritePattern { // null). SmallVector newResults(op.getNumResults()); SmallVector newAfterBlockArgs(op.getNumResults()); - for (auto it : llvm::enumerate(newResultsIndices)) { + for (const auto &it : llvm::enumerate(newResultsIndices)) { newResults[it.value()] = newWhile.getResult(it.index()); newAfterBlockArgs[it.value()] = newAfterBlock.getArgument(it.index()); } diff --git a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp index a3f307044052..d74b5d0457cc 100644 --- a/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp @@ -69,7 +69,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern { // Rewrite uses of the for-loop block arguments to the new while-loop // "after" arguments - for (auto barg : enumerate(forOp.getBody(0)->getArguments())) + for (const auto &barg : enumerate(forOp.getBody(0)->getArguments())) barg.value().replaceAllUsesWith(afterBlock->getArgument(barg.index())); // Inline for-loop body operations into 'after' region. @@ -87,7 +87,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern { // an extra value (the induction variable escapes the loop through being // carried in the set of iterargs). Instead, rewrite uses of the forOp // results. - for (auto arg : llvm::enumerate(forOp.getResults())) + for (const auto &arg : llvm::enumerate(forOp.getResults())) arg.value().replaceAllUsesWith(whileOp.getResult(arg.index() + 1)); rewriter.eraseOp(forOp); diff --git a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp index 321d953c17ed..3ef508275a76 100644 --- a/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp @@ -198,7 +198,7 @@ scf::ForOp LoopPipelinerInternal::createKernelLoop( llvm::SmallVector newLoopArg; // For existing loop argument initialize them with the right version from the // prologue. - for (auto retVal : + for (const auto &retVal : llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) { Operation *def = retVal.value().getDefiningOp(); assert(def && "Only support loop carried dependencies of distance 1"); @@ -245,7 +245,7 @@ void LoopPipelinerInternal::createKernel( rewriter.setInsertionPoint(newForOp.getBody(), newForOp.getBody()->begin()); BlockAndValueMapping mapping; mapping.map(forOp.getInductionVar(), newForOp.getInductionVar()); - for (auto arg : llvm::enumerate(forOp.getRegionIterArgs())) { + for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) { mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]); } for (Operation *op : opOrder) { @@ -325,7 +325,7 @@ void LoopPipelinerInternal::createKernel( yieldOperands.push_back(mapping.lookupOrDefault(it.first)); } // Map the yield operand to the forOp returned value. - for (auto retVal : + for (const auto &retVal : llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) { Operation *def = retVal.value().getDefiningOp(); assert(def && "Only support loop carried dependencies of distance 1"); diff --git a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp index 998c73624ca7..8bf32ac68c7f 100644 --- a/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp +++ b/mlir/lib/Dialect/SCF/Transforms/ParallelLoopTiling.cpp @@ -160,8 +160,8 @@ mlir::scf::tileParallelLoop(ParallelOp op, ArrayRef tileSizes, ifInbound.getThenRegion().takeBody(op.getRegion()); Block &thenBlock = ifInbound.getThenRegion().front(); b.setInsertionPointToStart(innerLoop.getBody()); - for (auto ivs : llvm::enumerate(llvm::zip(innerLoop.getInductionVars(), - outerLoop.getInductionVars()))) { + for (const auto &ivs : llvm::enumerate(llvm::zip( + innerLoop.getInductionVars(), outerLoop.getInductionVars()))) { auto newIndex = b.create( op.getLoc(), std::get<0>(ivs.value()), std::get<1>(ivs.value())); thenBlock.getArgument(ivs.index()) diff --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp index 10a3ba646771..6094ad8bf224 100644 --- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp @@ -182,7 +182,7 @@ LogicalResult ProcessInterfaceVarABI::matchAndRewrite( auto indexType = typeConverter.getIndexType(); auto attrName = spirv::getInterfaceVarABIAttrName(); - for (auto argType : llvm::enumerate(funcOp.getType().getInputs())) { + for (const auto &argType : llvm::enumerate(funcOp.getType().getInputs())) { auto abiInfo = funcOp.getArgAttrOfType( argType.index(), attrName); if (!abiInfo) { diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp index afa26650b4c4..776f022fe260 100644 --- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp @@ -235,7 +235,7 @@ getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) { return llvm::None; int64_t memrefSize = -1; - for (auto shape : enumerate(dims)) + for (const auto &shape : enumerate(dims)) memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]); return (offset + memrefSize) * elementSize.getValue(); @@ -557,7 +557,7 @@ FuncOpConversion::matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor, return failure(); TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs()); - for (auto argType : enumerate(fnType.getInputs())) { + for (const auto &argType : enumerate(fnType.getInputs())) { auto convertedType = getTypeConverter()->convertType(argType.value()); if (!convertedType) return failure(); @@ -778,7 +778,7 @@ Value mlir::spirv::linearizeIndex(ValueRange indices, ArrayRef strides, Value linearizedIndex = builder.create( loc, integerType, IntegerAttr::get(integerType, offset)); - for (auto index : llvm::enumerate(indices)) { + for (const auto &index : llvm::enumerate(indices)) { Value strideVal = builder.create( loc, integerType, IntegerAttr::get(integerType, strides[index.index()])); diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp index 4a415b456826..481e191e9df4 100644 --- a/mlir/lib/Dialect/Shape/IR/Shape.cpp +++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp @@ -1669,7 +1669,7 @@ static LogicalResult verify(ReduceOp op) { "ReduceOp operates on an extent tensor"); } - for (auto type : llvm::enumerate(op.getInitVals())) + for (const auto &type : llvm::enumerate(op.getInitVals())) if (block.getArgument(type.index() + 2).getType() != type.value().getType()) return op.emitOpError() << "type mismatch between argument " << type.index() + 2 diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp index f7665135b5b1..665021b4c70d 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp @@ -1017,7 +1017,7 @@ llvm::SmallDenseSet ExtractSliceOp::getDroppedDims() { ArrayRef resultShape = getType().getShape(); SmallVector mixedSizes = getMixedSizes(); unsigned shapePos = 0; - for (auto size : enumerate(mixedSizes)) { + for (const auto &size : enumerate(mixedSizes)) { Optional sizeVal = getConstantIntValue(size.value()); // If the size is not 1, or if the current matched dimension of the result // is the same static shape as the size value (which is 1), then the @@ -1039,7 +1039,7 @@ LogicalResult ExtractSliceOp::reifyResultShapes( SmallVector mixedSizes = getMixedSizes(); llvm::SmallDenseSet droppedDims = getDroppedDims(); Location loc = getLoc(); - for (auto size : enumerate(mixedSizes)) { + for (const auto &size : enumerate(mixedSizes)) { if (droppedDims.count(size.index())) continue; if (auto attr = size.value().dyn_cast()) { diff --git a/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp b/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp index 9a1ca53074d0..92daab5e8b8f 100644 --- a/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorMultiDimReductionTransforms.cpp @@ -126,7 +126,7 @@ public: // 1. Separate reduction and parallel dims. SmallVector parallelDims, parallelShapes; SmallVector reductionDims, reductionShapes; - for (auto it : llvm::enumerate(reductionMask)) { + for (const auto &it : llvm::enumerate(reductionMask)) { int64_t i = it.index(); bool isReduction = it.value(); if (isReduction) { diff --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp index fa608113b079..3f83578caade 100644 --- a/mlir/lib/Dialect/Vector/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/VectorOps.cpp @@ -270,7 +270,7 @@ void vector::MultiDimReductionOp::build(OpBuilder &builder, result.addTypes(targetType); SmallVector reductionDims; - for (auto en : llvm::enumerate(reductionMask)) + for (const auto &en : llvm::enumerate(reductionMask)) if (en.value()) reductionDims.push_back(en.index()); result.addAttribute(getReductionDimsAttrName(), @@ -615,7 +615,7 @@ static LogicalResult verify(ContractionOp op) { // that the number of map outputs equals the rank of its associated // vector operand. unsigned numIterators = op.iterator_types().getValue().size(); - for (auto it : llvm::enumerate(op.indexing_maps())) { + for (const auto &it : llvm::enumerate(op.indexing_maps())) { auto index = it.index(); auto map = it.value().cast().getValue(); if (map.getNumSymbols() != 0) @@ -695,7 +695,7 @@ static std::vector> getDimMap(ArrayRef indexingMaps, ArrayAttr iteratorTypes, StringRef targetIteratorTypeName, MLIRContext *context) { std::vector> dimMap; - for (auto it : llvm::enumerate(iteratorTypes)) { + for (const auto &it : llvm::enumerate(iteratorTypes)) { auto iteratorTypeName = it.value().cast().getValue(); if (iteratorTypeName != targetIteratorTypeName) continue; @@ -715,7 +715,7 @@ void ContractionOp::getIterationBounds( auto resVectorType = getResultType().dyn_cast(); SmallVector indexingMaps(getIndexingMaps()); SmallVector iterationShape; - for (auto it : llvm::enumerate(iterator_types())) { + for (const auto &it : llvm::enumerate(iterator_types())) { // Search lhs/rhs map results for 'targetExpr'. auto targetExpr = getAffineDimExpr(it.index(), getContext()); auto iteratorTypeName = it.value().cast().getValue(); @@ -738,7 +738,7 @@ void ContractionOp::getIterationIndexMap( std::vector> &iterationIndexMap) { unsigned numMaps = indexing_maps().getValue().size(); iterationIndexMap.resize(numMaps); - for (auto it : llvm::enumerate(indexing_maps())) { + for (const auto &it : llvm::enumerate(indexing_maps())) { auto index = it.index(); auto map = it.value().cast().getValue(); for (unsigned i = 0, e = map.getNumResults(); i < e; ++i) { @@ -933,7 +933,7 @@ static LogicalResult verify(vector::ExtractOp op) { if (positionAttr.size() > static_cast(op.getVectorType().getRank())) return op.emitOpError( "expected position attribute of rank smaller than vector rank"); - for (auto en : llvm::enumerate(positionAttr)) { + for (const auto &en : llvm::enumerate(positionAttr)) { auto attr = en.value().dyn_cast(); if (!attr || attr.getInt() < 0 || attr.getInt() >= op.getVectorType().getDimSize(en.index())) @@ -1511,7 +1511,7 @@ static LogicalResult verify(ShuffleOp op) { return op.emitOpError("mask length mismatch"); // Verify all indices. int64_t indexSize = v1Type.getDimSize(0) + v2Type.getDimSize(0); - for (auto en : llvm::enumerate(maskAttr)) { + for (const auto &en : llvm::enumerate(maskAttr)) { auto attr = en.value().dyn_cast(); if (!attr || attr.getInt() < 0 || attr.getInt() >= indexSize) return op.emitOpError("mask index #") @@ -1621,7 +1621,7 @@ static LogicalResult verify(InsertOp op) { (positionAttr.size() != static_cast(destVectorType.getRank()))) return op.emitOpError( "expected position attribute rank to match the dest vector rank"); - for (auto en : llvm::enumerate(positionAttr)) { + for (const auto &en : llvm::enumerate(positionAttr)) { auto attr = en.value().dyn_cast(); if (!attr || attr.getInt() < 0 || attr.getInt() >= destVectorType.getDimSize(en.index())) @@ -2822,7 +2822,7 @@ public: newIndices.push_back(getValueOrCreateConstantIndexOp( rewriter, extractOp.getLoc(), offset)); } - for (auto it : llvm::enumerate(xferOp.indices())) { + for (const auto &it : llvm::enumerate(xferOp.indices())) { OpFoldResult offset = extractOp.getMixedOffsets()[it.index() + rankReduced]; newIndices.push_back(rewriter.create( @@ -3913,7 +3913,7 @@ static LogicalResult verify(vector::TransposeOp op) { if (rank != size) return op.emitOpError("transposition length mismatch: ") << size; SmallVector seen(rank, false); - for (auto ta : llvm::enumerate(transpAttr)) { + for (const auto &ta : llvm::enumerate(transpAttr)) { int64_t i = ta.value().cast().getInt(); if (i < 0 || i >= rank) return op.emitOpError("transposition index out of range: ") << i; @@ -4004,7 +4004,7 @@ static LogicalResult verify(ConstantMaskOp &op) { // result dimension size. auto resultShape = resultType.getShape(); SmallVector maskDimSizes; - for (auto it : llvm::enumerate(op.mask_dim_sizes())) { + for (const auto &it : llvm::enumerate(op.mask_dim_sizes())) { int64_t attrValue = it.value().cast().getInt(); if (attrValue < 0 || attrValue > resultShape[it.index()]) return op.emitOpError( diff --git a/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp b/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp index 36725e03ae09..c47ef94e2e23 100644 --- a/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransferPermutationMapRewritePatterns.cpp @@ -79,7 +79,7 @@ struct TransferReadPermutationLowering // Apply the reverse transpose to deduce the type of the transfer_read. ArrayRef originalShape = op.getVectorType().getShape(); SmallVector newVectorShape(originalShape.size()); - for (auto pos : llvm::enumerate(permutation)) { + for (const auto &pos : llvm::enumerate(permutation)) { newVectorShape[pos.value()] = originalShape[pos.index()]; } diff --git a/mlir/lib/Dialect/Vector/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/VectorTransforms.cpp index 3cac3302af32..0b49ccd58b27 100644 --- a/mlir/lib/Dialect/Vector/VectorTransforms.cpp +++ b/mlir/lib/Dialect/Vector/VectorTransforms.cpp @@ -53,7 +53,7 @@ static Optional getResultIndex(AffineMap map, int64_t index) { static SmallVector adjustIter(ArrayAttr iteratorTypes, int64_t index) { SmallVector results; - for (auto it : llvm::enumerate(iteratorTypes)) { + for (const auto &it : llvm::enumerate(iteratorTypes)) { int64_t idx = it.index(); if (idx == index) continue; @@ -871,7 +871,7 @@ struct MultiReduceToContract auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size()); SmallVector exprs; SmallVector iteratorTypes; - for (auto isReduceDim : llvm::enumerate(reductionMask)) { + for (const auto &isReduceDim : llvm::enumerate(reductionMask)) { if (!isReduceDim.value()) { iteratorTypes.push_back(getParallelIteratorTypeName()); exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index())); @@ -997,7 +997,7 @@ struct CombineContractBroadcast broadcast.getVectorType().getRank() - srcType.getRank(); bool innerDimBroadcast = false; SmallVector originalDims; - for (auto dim : llvm::enumerate(srcType.getShape())) { + for (const auto &dim : llvm::enumerate(srcType.getShape())) { if (dim.value() != broadcast.getVectorType().getDimSize(rankDiff + dim.index())) { innerDimBroadcast = true; diff --git a/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp b/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp index 4c31164b433e..de5b2fdcfceb 100644 --- a/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp +++ b/mlir/lib/Dialect/Vector/VectorUnrollDistribute.cpp @@ -52,7 +52,7 @@ sliceTransferIndices(int64_t index, ArrayRef originalShape, getVectorOffset(originalShape, targetShape, index); // Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'. SmallVector slicedIndices(indices.begin(), indices.end()); - for (auto dim : llvm::enumerate(permutationMap.getResults())) { + for (const auto &dim : llvm::enumerate(permutationMap.getResults())) { if (isBroadcast(dim.value())) continue; unsigned pos = dim.value().cast().getPosition(); @@ -429,7 +429,7 @@ struct ContractExtractPattern : public OpRewritePattern { for (unsigned i : llvm::seq(unsigned(0), affineMap.getNumResults())) map[affineMap.getDimPosition(i)] = extract.getResultType().getDimSize(i); SmallVector extractOperands; - for (auto it : llvm::enumerate(contract.getIndexingMaps())) { + for (const auto &it : llvm::enumerate(contract.getIndexingMaps())) { // For each operands calculate the new vector type after distribution. Value operand = contract->getOperand(it.index()); auto vecType = operand.getType().cast(); diff --git a/mlir/lib/IR/AffineExpr.cpp b/mlir/lib/IR/AffineExpr.cpp index 47dcff627a33..2a3b9819b320 100644 --- a/mlir/lib/IR/AffineExpr.cpp +++ b/mlir/lib/IR/AffineExpr.cpp @@ -1022,7 +1022,7 @@ static AffineExpr getSemiAffineExprFromFlatForm(ArrayRef flatExprs, // as lhs/rhs, and store the indices, constant coefficient corresponding to // the indices in `coefficients` map, and affine expression corresponding to // in indices in `indexToExprMap` map. - for (auto it : llvm::enumerate(localExprs)) { + for (const auto &it : llvm::enumerate(localExprs)) { AffineExpr expr = it.value(); if (flatExprs[numDims + numSymbols + it.index()] == 0) continue; diff --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp index a60120637011..ecdf8376b5fc 100644 --- a/mlir/lib/IR/AffineMap.cpp +++ b/mlir/lib/IR/AffineMap.cpp @@ -121,7 +121,7 @@ bool AffineMap::isMinorIdentityWithBroadcasting( if (getNumDims() < getNumResults()) return false; unsigned suffixStart = getNumDims() - getNumResults(); - for (auto idxAndExpr : llvm::enumerate(getResults())) { + for (const auto &idxAndExpr : llvm::enumerate(getResults())) { unsigned resIdx = idxAndExpr.index(); AffineExpr expr = idxAndExpr.value(); if (auto constExpr = expr.dyn_cast()) { @@ -168,7 +168,7 @@ bool AffineMap::isPermutationOfMinorIdentityWithBroadcasting( getNumResults() > getNumInputs() ? getNumResults() - getNumInputs() : 0; llvm::SmallBitVector dimFound(std::max(getNumInputs(), getNumResults()), false); - for (auto idxAndExpr : llvm::enumerate(getResults())) { + for (const auto &idxAndExpr : llvm::enumerate(getResults())) { unsigned resIdx = idxAndExpr.index(); AffineExpr expr = idxAndExpr.value(); // Each result may be either a constant 0 (broadcast dimension) or a @@ -675,7 +675,7 @@ AffineMap mlir::inversePermutation(AffineMap map) { return map; assert(map.getNumSymbols() == 0 && "expected map without symbols"); SmallVector exprs(map.getNumDims()); - for (auto en : llvm::enumerate(map.getResults())) { + for (const auto &en : llvm::enumerate(map.getResults())) { auto expr = en.value(); // Skip non-permutations. if (auto d = expr.dyn_cast()) { diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp index e965afb0feaa..6efd384ad3cc 100644 --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -1036,7 +1036,7 @@ AffineMap mlir::makeStridedLinearLayoutMap(ArrayRef strides, } // AffineExpr for strides. - for (auto en : llvm::enumerate(strides)) { + for (const auto &en : llvm::enumerate(strides)) { auto dim = en.index(); auto stride = en.value(); assert(stride != 0 && "Invalid stride specification"); diff --git a/mlir/lib/IR/Verifier.cpp b/mlir/lib/IR/Verifier.cpp index 840a3156f283..bbc560d429d7 100644 --- a/mlir/lib/IR/Verifier.cpp +++ b/mlir/lib/IR/Verifier.cpp @@ -316,7 +316,7 @@ OperationVerifier::verifyDominanceOfContainedRegions(Operation &op, for (Operation &op : block) { if (isReachable) { // Check that operands properly dominate this use. - for (auto operand : llvm::enumerate(op.getOperands())) { + for (const auto &operand : llvm::enumerate(op.getOperands())) { if (domInfo.properlyDominates(operand.value(), &op)) continue; diff --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp index 26c80795c650..d2ab30282562 100644 --- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp +++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp @@ -131,7 +131,7 @@ verifyTypesAlongAllEdges(Operation *op, Optional sourceNo, << succInputsTypes.size(); } - for (auto typesIdx : + for (const auto &typesIdx : llvm::enumerate(llvm::zip(*sourceTypes, succInputsTypes))) { Type sourceType = std::get<0>(typesIdx.value()); Type inputType = std::get<1>(typesIdx.value()); @@ -266,7 +266,7 @@ bool mlir::insideMutuallyExclusiveRegions(Operation *a, Operation *b) { return false; // Compute index of region. int64_t beginIndex = -1; - for (auto it : llvm::enumerate(branchOp->getRegions())) + for (const auto &it : llvm::enumerate(branchOp->getRegions())) if (&it.value() == begin) beginIndex = it.index(); assert(beginIndex != -1 && "could not find region in op"); diff --git a/mlir/lib/Interfaces/InferTypeOpInterface.cpp b/mlir/lib/Interfaces/InferTypeOpInterface.cpp index 67c9ccbaec5b..ff17ed0498bb 100644 --- a/mlir/lib/Interfaces/InferTypeOpInterface.cpp +++ b/mlir/lib/Interfaces/InferTypeOpInterface.cpp @@ -189,7 +189,7 @@ LogicalResult mlir::detail::inferReturnTensorTypes( if (failed(componentTypeFn(context, location, operands, attributes, regions, retComponents))) return failure(); - for (auto shapeAndType : retComponents) { + for (const auto &shapeAndType : retComponents) { assert(shapeAndType.getAttribute() == nullptr && "attribute not supported"); if (shapeAndType.hasRank()) inferredReturnTypes.push_back(RankedTensorType::get( diff --git a/mlir/lib/Reducer/ReductionTreePass.cpp b/mlir/lib/Reducer/ReductionTreePass.cpp index 859f64a01e28..a1308f936255 100644 --- a/mlir/lib/Reducer/ReductionTreePass.cpp +++ b/mlir/lib/Reducer/ReductionTreePass.cpp @@ -41,7 +41,7 @@ static void applyPatterns(Region ®ion, std::vector opsNotInRange; std::vector opsInRange; size_t keepIndex = 0; - for (auto op : enumerate(region.getOps())) { + for (const auto &op : enumerate(region.getOps())) { int index = op.index(); if (keepIndex < rangeToKeep.size() && index == rangeToKeep[keepIndex].second) diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp index bd98ce0c1e09..765c47b2ed0c 100644 --- a/mlir/lib/Rewrite/ByteCode.cpp +++ b/mlir/lib/Rewrite/ByteCode.cpp @@ -198,9 +198,9 @@ public: maxTypeRangeMemoryIndex(maxTypeRangeMemoryIndex), maxValueRangeMemoryIndex(maxValueRangeMemoryIndex), maxLoopLevel(maxLoopLevel) { - for (auto it : llvm::enumerate(constraintFns)) + for (const auto &it : llvm::enumerate(constraintFns)) constraintToMemIndex.try_emplace(it.value().first(), it.index()); - for (auto it : llvm::enumerate(rewriteFns)) + for (const auto &it : llvm::enumerate(rewriteFns)) externalRewriterToMemIndex.try_emplace(it.value().first(), it.index()); } @@ -631,7 +631,7 @@ void Generator::allocateMemoryIndices(FuncOp matcherFunc, ByteCodeLiveRange &defRange = defIt.second; // Try to allocate to an existing index. - for (auto existingIndexIt : llvm::enumerate(allocatedIndices)) { + for (const auto &existingIndexIt : llvm::enumerate(allocatedIndices)) { ByteCodeLiveRange &existingRange = existingIndexIt.value(); if (!defRange.overlaps(existingRange)) { existingRange.unionWith(defRange); diff --git a/mlir/lib/Rewrite/PatternApplicator.cpp b/mlir/lib/Rewrite/PatternApplicator.cpp index d5a98fef09e7..edaf13e575d3 100644 --- a/mlir/lib/Rewrite/PatternApplicator.cpp +++ b/mlir/lib/Rewrite/PatternApplicator.cpp @@ -53,7 +53,7 @@ void PatternApplicator::applyCostModel(CostModel model) { // Apply the cost model to the bytecode patterns first, and then the native // patterns. if (const PDLByteCode *bytecode = frozenPatternList.getPDLByteCode()) { - for (auto it : llvm::enumerate(bytecode->getPatterns())) + for (const auto &it : llvm::enumerate(bytecode->getPatterns())) mutableByteCodeState->updatePatternBenefit(it.index(), model(it.value())); } diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp index 366a3d7ce24a..3e06f9caf7b1 100644 --- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp +++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp @@ -821,7 +821,7 @@ LogicalResult Importer::processFunction(llvm::Function *f) { currentEntryBlock = blockList[0]; // Add function arguments to the entry block. - for (auto kv : llvm::enumerate(f->args())) + for (const auto &kv : llvm::enumerate(f->args())) instMap[&kv.value()] = blockList[0]->addArgument(functionType.getParamType(kv.index())); diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index 7f238afd2c92..404018bebe93 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -381,7 +381,7 @@ static Value getPHISourceValue(Block *current, Block *pred, // the case branch that was taken. if (switchOp.getDefaultDestination() == current) return switchOp.getDefaultOperands()[index]; - for (auto i : llvm::enumerate(switchOp.getCaseDestinations())) + for (const auto &i : llvm::enumerate(switchOp.getCaseDestinations())) if (i.value() == current) return switchOp.getCaseOperands(i.index())[index]; } diff --git a/mlir/lib/Transforms/BufferResultsToOutParams.cpp b/mlir/lib/Transforms/BufferResultsToOutParams.cpp index 2abdccc6866c..2d74c4085e70 100644 --- a/mlir/lib/Transforms/BufferResultsToOutParams.cpp +++ b/mlir/lib/Transforms/BufferResultsToOutParams.cpp @@ -25,7 +25,7 @@ static void updateFuncOp(FuncOp func, // Collect information about the results will become appended arguments. SmallVector erasedResultTypes; SmallVector erasedResultIndices; - for (auto resultType : llvm::enumerate(functionType.getResults())) { + for (const auto &resultType : llvm::enumerate(functionType.getResults())) { if (resultType.value().isa()) { erasedResultIndices.push_back(resultType.index()); erasedResultTypes.push_back(resultType.value()); diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index e32c54264c65..fd9bba81c4a2 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -84,7 +84,7 @@ static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) { OpBuilder bOuter(forOp); // Put together alloc operands for any dynamic dimensions of the memref. SmallVector allocOperands; - for (auto dim : llvm::enumerate(oldMemRefType.getShape())) { + for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) { if (dim.value() == ShapedType::kDynamicSize) allocOperands.push_back(bOuter.createOrFold( forOp.getLoc(), oldMemRef, dim.index())); diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp index a299b8c5b660..24711b0de132 100644 --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -252,7 +252,7 @@ public: op->setLoc(loc); op->setAttrs(attrs); op->setOperands(operands); - for (auto it : llvm::enumerate(successors)) + for (const auto &it : llvm::enumerate(successors)) op->setSuccessor(it.value(), it.index()); } @@ -1256,7 +1256,7 @@ LogicalResult ConversionPatternRewriterImpl::remapValues( remapped.reserve(llvm::size(values)); SmallVector legalTypes; - for (auto it : llvm::enumerate(values)) { + for (const auto &it : llvm::enumerate(values)) { Value operand = it.value(); Type origType = operand.getType(); diff --git a/mlir/lib/Transforms/Utils/InliningUtils.cpp b/mlir/lib/Transforms/Utils/InliningUtils.cpp index 3eed22d8a5b4..8b2040633a1a 100644 --- a/mlir/lib/Transforms/Utils/InliningUtils.cpp +++ b/mlir/lib/Transforms/Utils/InliningUtils.cpp @@ -215,7 +215,7 @@ inlineRegionImpl(InlinerInterface &interface, Region *src, Block *inlineBlock, } else { // Otherwise, there were multiple blocks inlined. Add arguments to the post // insertion block to represent the results to replace. - for (auto resultToRepl : llvm::enumerate(resultsToReplace)) { + for (const auto &resultToRepl : llvm::enumerate(resultsToReplace)) { resultToRepl.value().replaceAllUsesWith(postInsertBlock->addArgument( regionResultTypes[resultToRepl.index()])); } diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 1700d60a9173..6328b59d9008 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -3353,7 +3353,7 @@ createFullTiles(MutableArrayRef inputNest, // Add the body for the full tile loop nest. BlockAndValueMapping operandMap; - for (auto loopEn : llvm::enumerate(inputNest)) + for (const auto &loopEn : llvm::enumerate(inputNest)) operandMap.map(loopEn.value().getInductionVar(), fullTileLoops[loopEn.index()].getInductionVar()); b = OpBuilder::atBlockTerminator(fullTileLoops.back().getBody()); diff --git a/mlir/lib/Transforms/Utils/RegionUtils.cpp b/mlir/lib/Transforms/Utils/RegionUtils.cpp index 023b1d6ed5ea..e60e7e65cd60 100644 --- a/mlir/lib/Transforms/Utils/RegionUtils.cpp +++ b/mlir/lib/Transforms/Utils/RegionUtils.cpp @@ -589,7 +589,7 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) { 1 + blocksToMerge.size(), SmallVector(operandsToMerge.size())); unsigned curOpIndex = 0; - for (auto it : llvm::enumerate(operandsToMerge)) { + for (const auto &it : llvm::enumerate(operandsToMerge)) { unsigned nextOpOffset = it.value().first - curOpIndex; curOpIndex = it.value().first;