Apply clang-tidy fixes for performance-for-range-copy to MLIR (NFC)

This commit is contained in:
Mehdi Amini 2022-01-02 22:02:14 +00:00
parent abb336d26b
commit e4853be2f1
73 changed files with 150 additions and 146 deletions

View File

@ -3346,7 +3346,7 @@ AffineMap mlir::alignAffineMapWithValues(AffineMap map, ValueRange operands,
newSyms->append(syms.begin(), syms.end());
}
for (auto operand : llvm::enumerate(operands)) {
for (const auto &operand : llvm::enumerate(operands)) {
// Compute replacement dim/sym of operand.
AffineExpr replacement;
auto dimIt = std::find(dims.begin(), dims.end(), operand.value());

View File

@ -353,7 +353,8 @@ bool mlir::isOpwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
// Work backwards over the body of the block so that the shift of a use's
// ancestor operation in the block gets recorded before it's looked up.
DenseMap<Operation *, uint64_t> forBodyShift;
for (auto it : llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
for (const auto &it :
llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
auto &op = it.value();
// Get the index of the current operation, note that we are iterating in

View File

@ -52,7 +52,7 @@ static void computeRegionBlockNumberOfExecutions(
// Query RegionBranchOpInterface interface if it is available.
if (auto regionInterface = dyn_cast<RegionBranchOpInterface>(parentOp)) {
SmallVector<Attribute, 4> operands(parentOp->getNumOperands());
for (auto operandIt : llvm::enumerate(parentOp->getOperands()))
for (const auto &operandIt : llvm::enumerate(parentOp->getOperands()))
matchPattern(operandIt.value(), m_Constant(&operands[operandIt.index()]));
regionInterface.getNumRegionInvocations(operands, numRegionsInvocations);

View File

@ -86,7 +86,7 @@ static void getBackwardSliceImpl(Operation *op,
if (filter && !filter(op))
return;
for (auto en : llvm::enumerate(op->getOperands())) {
for (const auto &en : llvm::enumerate(op->getOperands())) {
auto operand = en.value();
if (auto *definingOp = operand.getDefiningOp()) {
if (backwardSlice->count(definingOp) == 0)

View File

@ -1155,7 +1155,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
resultTypes.reserve(resultTypeList.size());
if (resultSegmentSpecObj.is_none()) {
// Non-variadic result unpacking.
for (auto it : llvm::enumerate(resultTypeList)) {
for (const auto &it : llvm::enumerate(resultTypeList)) {
try {
resultTypes.push_back(py::cast<PyType *>(it.value()));
if (!resultTypes.back())
@ -1179,7 +1179,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
.str());
}
resultSegmentLengths.reserve(resultTypeList.size());
for (auto it :
for (const auto &it :
llvm::enumerate(llvm::zip(resultTypeList, resultSegmentSpec))) {
int segmentSpec = std::get<1>(it.value());
if (segmentSpec == 1 || segmentSpec == 0) {
@ -1240,7 +1240,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
operands.reserve(operands.size());
if (operandSegmentSpecObj.is_none()) {
// Non-sized operand unpacking.
for (auto it : llvm::enumerate(operandList)) {
for (const auto &it : llvm::enumerate(operandList)) {
try {
operands.push_back(py::cast<PyValue *>(it.value()));
if (!operands.back())
@ -1264,7 +1264,7 @@ PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
.str());
}
operandSegmentLengths.reserve(operandList.size());
for (auto it :
for (const auto &it :
llvm::enumerate(llvm::zip(operandList, operandSegmentSpec))) {
int segmentSpec = std::get<1>(it.value());
if (segmentSpec == 1 || segmentSpec == 0) {

View File

@ -21,7 +21,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
SmallVector<LLVM::GlobalOp, 3> workgroupBuffers;
workgroupBuffers.reserve(gpuFuncOp.getNumWorkgroupAttributions());
for (auto en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
for (const auto &en : llvm::enumerate(gpuFuncOp.getWorkgroupAttributions())) {
Value attribution = en.value();
auto type = attribution.getType().dyn_cast<MemRefType>();
@ -88,7 +88,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
if (!workgroupBuffers.empty())
zero = rewriter.create<LLVM::ConstantOp>(loc, i32Type,
rewriter.getI32IntegerAttr(0));
for (auto en : llvm::enumerate(workgroupBuffers)) {
for (const auto &en : llvm::enumerate(workgroupBuffers)) {
LLVM::GlobalOp global = en.value();
Value address = rewriter.create<LLVM::AddressOfOp>(loc, global);
auto elementType =
@ -111,7 +111,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
// Rewrite private memory attributions to alloca'ed buffers.
unsigned numWorkgroupAttributions = gpuFuncOp.getNumWorkgroupAttributions();
auto int64Ty = IntegerType::get(rewriter.getContext(), 64);
for (auto en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
for (const auto &en : llvm::enumerate(gpuFuncOp.getPrivateAttributions())) {
Value attribution = en.value();
auto type = attribution.getType().cast<MemRefType>();
assert(type && type.hasStaticShape() && "unexpected type in attribution");

View File

@ -634,7 +634,7 @@ Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray(
arraySize, /*alignment=*/0);
auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type,
builder.getI32IntegerAttr(0));
for (auto en : llvm::enumerate(arguments)) {
for (const auto &en : llvm::enumerate(arguments)) {
auto index = builder.create<LLVM::ConstantOp>(
loc, llvmInt32Type, builder.getI32IntegerAttr(en.index()));
auto fieldPtr = builder.create<LLVM::GEPOp>(

View File

@ -206,7 +206,7 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, TypeConverter &typeConverter,
// LowerABIAttributesPass.
TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
{
for (auto argType : enumerate(funcOp.getType().getInputs())) {
for (const auto &argType : enumerate(funcOp.getType().getInputs())) {
auto convertedType = typeConverter.convertType(argType.value());
signatureConverter.addInputs(argType.index(), convertedType);
}

View File

@ -222,7 +222,7 @@ void VulkanLaunchFuncToVulkanCallsPass::createBindMemRefCalls(
Value descriptorSet = builder.create<LLVM::ConstantOp>(
loc, getInt32Type(), builder.getI32IntegerAttr(0));
for (auto en :
for (const auto &en :
llvm::enumerate(cInterfaceVulkanLaunchCallOp.getOperands().drop_front(
kVulkanLaunchNumConfigOperands))) {
// Create LLVM constant for the descriptor binding index.

View File

@ -213,11 +213,11 @@ MemRefDescriptor ConvertToLLVMPattern::createMemRefDescriptor(
createIndexConstant(rewriter, loc, 0));
// Fields 4: Sizes.
for (auto en : llvm::enumerate(sizes))
for (const auto &en : llvm::enumerate(sizes))
memRefDescriptor.setSize(rewriter, loc, en.index(), en.value());
// Field 5: Strides.
for (auto en : llvm::enumerate(strides))
for (const auto &en : llvm::enumerate(strides))
memRefDescriptor.setStride(rewriter, loc, en.index(), en.value());
return memRefDescriptor;

View File

@ -101,7 +101,7 @@ LogicalResult LLVM::detail::handleMultidimensionalVectors(
// For this unrolled `position` corresponding to the `linearIndex`^th
// element, extract operand vectors
SmallVector<Value, 4> extractedOperands;
for (auto operand : llvm::enumerate(operands)) {
for (const auto &operand : llvm::enumerate(operands)) {
extractedOperands.push_back(rewriter.create<LLVM::ExtractValueOp>(
loc, operand1DVectorTypes[operand.index()], operand.value(),
position));

View File

@ -1420,7 +1420,8 @@ public:
targetMemRef.setOffset(rewriter, loc, viewMemRef.offset(rewriter, loc));
// Iterate over the dimensions and apply size/stride permutation.
for (auto en : llvm::enumerate(transposeOp.permutation().getResults())) {
for (const auto &en :
llvm::enumerate(transposeOp.permutation().getResults())) {
int sourcePos = en.index();
int targetPos = en.value().cast<AffineDimExpr>().getPosition();
targetMemRef.setSize(rewriter, loc, targetPos,

View File

@ -736,7 +736,7 @@ void PatternLowering::generateRewriter(
bool seenVariableLength = false;
Type valueTy = builder.getType<pdl::ValueType>();
Type valueRangeTy = pdl::RangeType::get(valueTy);
for (auto it : llvm::enumerate(resultTys)) {
for (const auto &it : llvm::enumerate(resultTys)) {
Value &type = rewriteValues[it.value()];
if (type)
continue;
@ -862,7 +862,7 @@ void PatternLowering::generateOperationResultTypeRewriter(
// Otherwise, handle inference for each of the result types individually.
OperandRange resultTypeValues = op.types();
types.reserve(resultTypeValues.size());
for (auto it : llvm::enumerate(resultTypeValues)) {
for (const auto &it : llvm::enumerate(resultTypeValues)) {
Value resultType = it.value();
// Check for an already translated value.

View File

@ -162,7 +162,7 @@ static void getTreePredicates(std::vector<PositionalPredicate> &predList,
builder.getAllOperands(opPos));
} else {
bool foundVariableLength = false;
for (auto operandIt : llvm::enumerate(operands)) {
for (const auto &operandIt : llvm::enumerate(operands)) {
bool isVariadic = operandIt.value().getType().isa<pdl::RangeType>();
foundVariableLength |= isVariadic;
@ -460,7 +460,7 @@ static void buildCostGraph(ArrayRef<Value> roots, RootOrderingGraph &graph,
}
// Default case: visit all the operands.
for (auto p : llvm::enumerate(operationOp.operands()))
for (const auto &p : llvm::enumerate(operationOp.operands()))
toVisit.emplace(p.value(), entry.value, p.index(),
entry.depth + 1);
})

View File

@ -261,7 +261,7 @@ void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
builder.setInsertionPointToStart(&launchOp.body().front());
auto *lbArgumentIt = lbs.begin();
auto *stepArgumentIt = steps.begin();
for (auto en : llvm::enumerate(ivs)) {
for (const auto &en : llvm::enumerate(ivs)) {
Value id =
en.index() < numBlockDims
? getDim3Value(launchOp.getBlockIds(), en.index())

View File

@ -387,7 +387,7 @@ WhileOpConversion::matchAndRewrite(scf::WhileOp whileOp, OpAdaptor adaptor,
// the before region, which may not matching the whole op's result. Instead,
// the scf.condition op returns values matching the whole op's results. So we
// need to create/load/store variables according to that.
for (auto it : llvm::enumerate(condArgs)) {
for (const auto &it : llvm::enumerate(condArgs)) {
auto res = it.value();
auto i = it.index();
auto pointerType =

View File

@ -208,7 +208,7 @@ class GPULaunchLowering : public ConvertOpToLLVMPattern<gpu::LaunchFuncOp> {
SmallVector<CopyInfo, 4> copyInfo;
auto numKernelOperands = launchOp.getNumKernelOperands();
auto kernelOperands = adaptor.getOperands().take_back(numKernelOperands);
for (auto operand : llvm::enumerate(kernelOperands)) {
for (const auto &operand : llvm::enumerate(kernelOperands)) {
// Check if the kernel's operand is a ranked memref.
auto memRefType = launchOp.getKernelOperand(operand.index())
.getType()

View File

@ -254,7 +254,7 @@ protected:
rewriter.getNamedAttr(function_like_impl::getArgDictAttrName(),
rewriter.getArrayAttr(newArgAttrs)));
}
for (auto pair : llvm::enumerate(attributes)) {
for (const auto &pair : llvm::enumerate(attributes)) {
if (pair.value().getName() == "llvm.linkage") {
attributes.erase(attributes.begin() + pair.index());
break;

View File

@ -694,7 +694,7 @@ elementwiseMatchAndRewriteHelper(Operation *operation,
SmallVector<int64_t, 5> newShape;
SmallVector<AffineExpr, 4> affineExprs;
newShape.reserve(type.getRank());
for (auto it : llvm::enumerate(type.getShape())) {
for (const auto &it : llvm::enumerate(type.getShape())) {
if (it.value() == resultTy.getDimSize(it.index())) {
newShape.push_back(it.value());
affineExprs.push_back(
@ -1175,7 +1175,7 @@ public:
SmallVector<AffineExpr, 2> inputExprs;
inputExprs.resize(resultTy.getRank());
auto operandTy = input.getType().cast<ShapedType>();
for (auto permutation : llvm::enumerate(perms.getValues<APInt>())) {
for (const auto &permutation : llvm::enumerate(perms.getValues<APInt>())) {
auto index = permutation.index();
auto value = permutation.value().getZExtValue();
if (!operandTy.hasRank() || operandTy.isDynamicDim(index)) {

View File

@ -449,7 +449,7 @@ static void convertForOp(scf::ForOp op,
llvm::DenseMap<Value, Value> &valueMapping) {
SmallVector<Value> newOperands;
SmallVector<std::pair<size_t, size_t>> argMapping;
for (auto operand : llvm::enumerate(op.getIterOperands())) {
for (const auto &operand : llvm::enumerate(op.getIterOperands())) {
auto it = valueMapping.find(operand.value());
if (it == valueMapping.end())
continue;
@ -474,7 +474,7 @@ static void convertYieldOp(scf::YieldOp op,
OpBuilder b(op);
auto loop = cast<scf::ForOp>(op->getParentOp());
auto yieldOperands = llvm::to_vector<4>(op.getOperands());
for (auto operand : llvm::enumerate(op.getOperands())) {
for (const auto &operand : llvm::enumerate(op.getOperands())) {
auto it = valueMapping.find(operand.value());
if (it == valueMapping.end())
continue;

View File

@ -497,7 +497,7 @@ public:
eltType = llvmType.cast<VectorType>().getElementType();
Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
int64_t insPos = 0;
for (auto en : llvm::enumerate(maskArrayAttr)) {
for (const auto &en : llvm::enumerate(maskArrayAttr)) {
int64_t extPos = en.value().cast<IntegerAttr>().getInt();
Value value = adaptor.v1();
if (extPos >= v1Dim) {
@ -883,7 +883,8 @@ public:
desc.setOffset(rewriter, loc, zero);
// Fill size and stride descriptors in memref.
for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
for (const auto &indexedSize :
llvm::enumerate(targetMemRefType.getShape())) {
int64_t index = indexedSize.index();
auto sizeAttr =
rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());

View File

@ -680,7 +680,7 @@ static void composeAffineMapAndOperands(AffineMap *map,
for (auto *container : {&dims, &syms}) {
bool isDim = (container == &dims);
auto &repls = isDim ? dimReplacements : symReplacements;
for (auto en : llvm::enumerate(*container)) {
for (const auto &en : llvm::enumerate(*container)) {
Value v = en.value();
if (!v) {
assert(isDim ? !map->isFunctionOfDim(en.index())

View File

@ -940,7 +940,7 @@ static LogicalResult verify(gpu::ReturnOp returnOp) {
.attachNote(function.getLoc())
.append("return type declared here");
for (auto pair : llvm::enumerate(
for (const auto &pair : llvm::enumerate(
llvm::zip(function.getType().getResults(), returnOp.operands()))) {
Type type;
Value operand;

View File

@ -54,7 +54,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
// Replace the leading 12 function args with the respective thread/block index
// operations. Iterate backwards since args are erased and indices change.
for (auto indexOp : enumerate(indexOps))
for (const auto &indexOp : enumerate(indexOps))
map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
}
@ -174,7 +174,7 @@ static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
// Map arguments from gpu.launch region to the arguments of the gpu.func
// operation.
Block &entryBlock = outlinedFuncBody.front();
for (auto operand : enumerate(operands))
for (const auto &operand : enumerate(operands))
map.map(operand.value(), entryBlock.getArgument(operand.index()));
// Clone the region of the gpu.launch operation into the gpu.func operation.

View File

@ -89,7 +89,7 @@ static void insertCopyLoops(ImplicitLocOpBuilder &b, Value from, Value to) {
});
// Map the innermost loops to threads in reverse order.
for (auto en :
for (const auto &en :
llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back(
GPUDialect::getNumWorkgroupDimensions())))) {
Value v = en.value();

View File

@ -1485,7 +1485,7 @@ static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) {
// list is parsed, returns -1.
static int parseOptionalKeywordAlternative(OpAsmParser &parser,
ArrayRef<StringRef> keywords) {
for (auto en : llvm::enumerate(keywords)) {
for (const auto &en : llvm::enumerate(keywords)) {
if (succeeded(parser.parseOptionalKeyword(en.value())))
return en.index();
}

View File

@ -103,7 +103,7 @@ LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, FuncOp f) {
LinalgDependenceGraph::LinalgDependenceGraph(Aliases &aliases,
ArrayRef<LinalgOp> ops)
: aliases(aliases), linalgOps(ops.begin(), ops.end()) {
for (auto en : llvm::enumerate(linalgOps)) {
for (const auto &en : llvm::enumerate(linalgOps)) {
linalgOpPositions.insert(
std::make_pair(en.value().getOperation(), en.index()));
}

View File

@ -1093,7 +1093,7 @@ static LogicalResult verify(PadTensorOp op) {
return op.emitError("expected the block to have ") << rank << " arguments";
// Note: the number and type of yield values are checked in the YieldOp.
for (auto en : llvm::enumerate(block.getArgumentTypes())) {
for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
if (!en.value().isIndex())
return op.emitOpError("expected block argument ")
<< (en.index() + 1) << " to be an index";
@ -1204,7 +1204,7 @@ PadTensorOp PadTensorOp::createPadHighOp(Type type, Value source, Value pad,
SmallVector<OpFoldResult, 4> low, high;
auto rankedTensorType = type.cast<RankedTensorType>();
assert(rankedTensorType.hasStaticShape());
for (auto en : enumerate(rankedTensorType.getShape())) {
for (const auto &en : enumerate(rankedTensorType.getShape())) {
AffineExpr d0;
bindDims(b.getContext(), d0);
auto dimOp = b.createOrFold<tensor::DimOp>(loc, source, en.index());
@ -1275,7 +1275,7 @@ SmallVector<Range> PadTensorOp::getIterationDomain(OpBuilder &b) {
// Initialize all the ranges to {zero, one, one}. All the `ub`s are
// overwritten.
SmallVector<Range> loopRanges(reifiedShapes[0].size(), {zero, one, one});
for (auto ub : enumerate(reifiedShapes[0]))
for (const auto &ub : enumerate(reifiedShapes[0]))
loopRanges[ub.index()].size = ub.value();
return loopRanges;
}
@ -2001,7 +2001,7 @@ struct TiledLoopInputsFolder : public OpRewritePattern<linalg::TiledLoopOp> {
// Store ids of the corresponding old and new input operands.
SmallVector<int64_t, 2> oldInputIdToNew(tiledLoop.inputs().size(),
kNoMatch);
for (auto en : llvm::enumerate(
for (const auto &en : llvm::enumerate(
llvm::zip(tiledLoop.inputs(), tiledLoop.getRegionInputArgs()))) {
Value in, bbArg;
size_t index = en.index();
@ -2215,7 +2215,7 @@ struct TiledLoopResultsFolder : public OpRewritePattern<linalg::TiledLoopOp> {
SmallVector<int64_t, 2> oldResultIdToNew(tiledLoop.getNumResults(),
kNoMatch);
SmallVector<Value, 2> resultReplacement(tiledLoop.getNumResults());
for (auto en : llvm::enumerate(
for (const auto &en : llvm::enumerate(
llvm::zip(tiledLoop.outputs(), tiledLoop.getRegionOutputArgs()))) {
size_t index = en.index();
Value out = std::get<0>(en.value());

View File

@ -43,7 +43,7 @@ allocateBuffersForResults(Location loc, LinalgOp linalgOp, ValueRange outputs,
// Allocate a buffer for every tensor result.
assert(linalgOp.getNumOutputs() == linalgOp->getNumResults());
for (auto en : llvm::enumerate(linalgOp->getResultTypes())) {
for (const auto &en : llvm::enumerate(linalgOp->getResultTypes())) {
size_t resultIndex = en.index();
Type resultType = en.value();

View File

@ -186,7 +186,7 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
DenseSet<unsigned> unitDims;
SmallVector<unsigned, 4> unitDimsReductionLoops;
ArrayAttr iteratorTypes = genericOp.iterator_types();
for (auto expr : enumerate(invertedMap.getResults())) {
for (const auto &expr : enumerate(invertedMap.getResults())) {
if (AffineDimExpr dimExpr = expr.value().dyn_cast<AffineDimExpr>())
if (dims[dimExpr.getPosition()] == 1)
unitDims.insert(expr.index());
@ -205,7 +205,7 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
// Compute the iterator types of the modified op by dropping the one-trip
// count loops.
SmallVector<Attribute, 4> newIteratorTypes;
for (auto attr : llvm::enumerate(iteratorTypes)) {
for (const auto &attr : llvm::enumerate(iteratorTypes)) {
if (!unitDims.count(attr.index()))
newIteratorTypes.push_back(attr.value());
}
@ -439,7 +439,7 @@ struct ReplaceUnitExtents : public OpRewritePattern<GenericOp> {
// If any result tensor has a modified shape, then add reshape to recover
// the original shape.
SmallVector<Value, 4> resultReplacements;
for (auto result : llvm::enumerate(replacementOp.getResults())) {
for (const auto &result : llvm::enumerate(replacementOp.getResults())) {
unsigned index = result.index() + replacementOp.getNumInputs();
auto origResultType = genericOp.getResult(result.index()).getType();
@ -465,7 +465,7 @@ static Optional<SmallVector<ReassociationIndices>>
getReassociationMapForFoldingUnitDims(ArrayRef<OpFoldResult> mixedSizes) {
SmallVector<ReassociationIndices> reassociation;
ReassociationIndices curr;
for (auto it : llvm::enumerate(mixedSizes)) {
for (const auto &it : llvm::enumerate(mixedSizes)) {
auto dim = it.index();
auto size = it.value();
curr.push_back(dim);

View File

@ -565,7 +565,7 @@ LogicalResult ExpansionInfo::compute(LinalgOp linalgOp,
// dimension of the original op.
SmallVector<unsigned> numExpandedDims(fusedIndexMap.getNumDims(), 1);
expandedShapeMap.resize(fusedIndexMap.getNumDims());
for (auto resultExpr : llvm::enumerate(fusedIndexMap.getResults())) {
for (const auto &resultExpr : llvm::enumerate(fusedIndexMap.getResults())) {
unsigned pos = resultExpr.value().cast<AffineDimExpr>().getPosition();
AffineMap foldedDims = reassociationMaps[resultExpr.index()];
numExpandedDims[pos] = foldedDims.getNumResults();
@ -581,7 +581,7 @@ LogicalResult ExpansionInfo::compute(LinalgOp linalgOp,
// Compute reassociation map from the original op to the expanded op.
unsigned sum = 0;
reassociation.reserve(fusedIndexMap.getNumDims());
for (auto numFoldedDim : llvm::enumerate(numExpandedDims)) {
for (const auto &numFoldedDim : llvm::enumerate(numExpandedDims)) {
auto seq = llvm::seq<int64_t>(sum, sum + numFoldedDim.value());
reassociation.emplace_back(seq.begin(), seq.end());
sum += numFoldedDim.value();
@ -861,7 +861,7 @@ struct FoldProducerReshapeOpByLinearization
if (!genericOp.hasTensorSemantics())
return failure();
SmallVector<OpOperand *> inputOperands = genericOp.getInputOperands();
for (auto en : llvm::enumerate(inputOperands)) {
for (const auto &en : llvm::enumerate(inputOperands)) {
auto reshapeOp = en.value()->get().getDefiningOp<TensorReshapeOp>();
if (!reshapeOp)
continue;
@ -976,7 +976,7 @@ struct PushExpandingReshape : public OpRewritePattern<GenericOp> {
// 1. Look for tensor_expand_shape operands and figure out save the
// dimensions merged.
SmallVector<OpOperand *> inputOperands = genericOp.getInputOperands();
for (auto en : llvm::enumerate(inputOperands)) {
for (const auto &en : llvm::enumerate(inputOperands)) {
auto reshapeOp =
en.value()->get().template getDefiningOp<tensor::ExpandShapeOp>();
if (!reshapeOp)
@ -1010,7 +1010,7 @@ struct PushExpandingReshape : public OpRewritePattern<GenericOp> {
// 2. Verify that we can merge the dimensions in the linalg and that we
// don't need to create new reshapes operands. Inserting new reshape
// operands would defeat the purpose of the transformation.
for (auto en : llvm::enumerate(inputOperands)) {
for (const auto &en : llvm::enumerate(inputOperands)) {
if (en.value()->get() == newOperands[en.index()]) {
AffineMap map = genericOp.getTiedIndexingMap(en.value());
for (unsigned i : llvm::seq(unsigned(0), map.getNumResults())) {
@ -1060,7 +1060,7 @@ struct PushExpandingReshape : public OpRewritePattern<GenericOp> {
newOp.region().begin());
// 6. Reshape the so that the type matches the uses.
SmallVector<Value> newResults;
for (auto result : llvm::enumerate(newOp->getResults())) {
for (const auto &result : llvm::enumerate(newOp->getResults())) {
newResults.push_back(rewriter.create<tensor::ExpandShapeOp>(
genericOp->getLoc(), genericOp.getOutputTensorTypes()[result.index()],
result.value(), reassociation));
@ -1407,7 +1407,7 @@ public:
// All inputs should be constants.
int numInputs = genericOp.getNumInputs();
SmallVector<DenseIntOrFPElementsAttr> inputValues(numInputs);
for (auto operand : llvm::enumerate(genericOp.getInputOperands())) {
for (const auto &operand : llvm::enumerate(genericOp.getInputOperands())) {
if (!matchPattern(operand.value()->get(),
m_Constant(&inputValues[operand.index()])))
return failure();
@ -1712,7 +1712,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
continue;
modifiedOutput = true;
SmallVector<Value> dynamicDims;
for (auto dim : llvm::enumerate(operandType.getShape())) {
for (const auto &dim : llvm::enumerate(operandType.getShape())) {
if (dim.value() != ShapedType::kDynamicSize)
continue;
dynamicDims.push_back(rewriter.createOrFold<tensor::DimOp>(

View File

@ -87,7 +87,7 @@ getShapeDefiningLoopRange(LinalgOp op, unsigned loopDepth,
LLVM_DEBUG(llvm::dbgs()
<< "getShapeDefiningLoopRange map: " << map << "\n");
SmallVector<Value, 8> shapeRanges(map.getNumResults(), nullptr);
for (auto en : llvm::enumerate(map.getResults())) {
for (const auto &en : llvm::enumerate(map.getResults())) {
auto dimExpr = en.value().dyn_cast<AffineDimExpr>();
if (!dimExpr)
continue;
@ -250,7 +250,7 @@ static LinalgOp fuse(OpBuilder &b, LinalgOp producerOp, AffineMap producerMap,
LLVM_DEBUG(llvm::dbgs() << "Producer map: " << producerMap << "\n");
DenseMap<unsigned, Range> fusedLoopsAndRanges;
Value shapedOperand = consumerOpOperand.get();
for (auto en : llvm::enumerate(producerMap.getResults())) {
for (const auto &en : llvm::enumerate(producerMap.getResults())) {
unsigned posInProducerLoop = en.value().cast<AffineDimExpr>().getPosition();
fusedLoopsAndRanges[posInProducerLoop] = getRangeFromOperandShape(
b, consumerOpOperand.getOwner()->getLoc(), shapedOperand, en.index());
@ -521,7 +521,7 @@ mlir::linalg::fuseProducerOfTensor(OpBuilder &b, OpResult producerOpResult,
static AffineMap pruneReductionDimsFromMap(ArrayRef<Attribute> iteratorTypes,
AffineMap map) {
llvm::SmallDenseSet<unsigned> projectedDims;
for (auto attr : llvm::enumerate(iteratorTypes)) {
for (const auto &attr : llvm::enumerate(iteratorTypes)) {
if (!isParallelIterator(attr.value()))
projectedDims.insert(attr.index());
}
@ -810,7 +810,7 @@ fuseOperations(OpBuilder &b, LinalgOp rootOp, TiledLinalgOp tiledLinalgOp,
SmallVector<LinalgOp, 1> fusedOps(fusionCandidates.size());
DenseMap<Operation *, LinalgOp> origOpToFusedOp;
origOpToFusedOp[rootOp.getOperation()] = tiledOp;
for (auto candidate : enumerate(llvm::reverse(fusionCandidates))) {
for (const auto &candidate : enumerate(llvm::reverse(fusionCandidates))) {
LinalgOp origOp = candidate.value();
LinalgOp fusedOp = fuse(b, origOp, fusedLoopsAndRanges);
origOpToFusedOp[origOp.getOperation()] = fusedOp;

View File

@ -42,7 +42,7 @@ static SmallVector<int64_t> getTiledSliceDims(OpOperand *consumerOperand,
// Search the slice dimensions tiled by a tile loop dimension.
DenseSet<int64_t> tiledSliceDimIndices;
for (auto en : enumerate(indexingMap.getResults())) {
for (const auto &en : enumerate(indexingMap.getResults())) {
for (auto tiledLoopDim : tiledLoopDims) {
if (en.value().isFunctionOfDim(tiledLoopDim))
tiledSliceDimIndices.insert(en.index());
@ -304,7 +304,7 @@ LogicalResult TileLoopNest::tileRootOp(OpBuilder &b,
// Update the root operation and append the loops and tile loop dimensions.
rootOp = tiledRootOp->op;
tileLoopOps.append(tiledRootOp->loops.begin(), tiledRootOp->loops.end());
for (auto en : enumerate(tileSizes)) {
for (const auto &en : enumerate(tileSizes)) {
// Copy only the tiled loop dimensions with non-zero tile size.
if (en.value() == 0)
continue;

View File

@ -346,7 +346,7 @@ void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
changed = false;
func.walk([&](scf::ForOp forOp) {
Operation *yield = forOp.getBody()->getTerminator();
for (auto it : llvm::enumerate(forOp.getRegionIterArgs())) {
for (const auto &it : llvm::enumerate(forOp.getRegionIterArgs())) {
OpOperand &ret = yield->getOpOperand(it.index());
HoistableWrite write =
getLoopInvariantTransferWriteOpDefining(forOp, ret);

View File

@ -277,7 +277,7 @@ struct TiledLoopToSCFPattern : public OpRewritePattern<TiledLoopOp> {
// Collect loop control parameters for parallel and sequential dimensions.
SmallVector<Value, 3> seqLBs, seqUBs, seqSteps, seqIVs;
SmallVector<Value, 3> parLBs, parUBs, parSteps, parIVs;
for (auto en : llvm::enumerate(
for (const auto &en : llvm::enumerate(
llvm::zip(tiledLoop.lowerBound(), tiledLoop.upperBound(),
tiledLoop.step(), tiledLoop.getInductionVars()))) {
Value lb, ub, step, iv;

View File

@ -87,7 +87,7 @@ defaultAllocBufferCallBack(const LinalgPromotionOptions &options,
auto one = b.createOrFold<arith::ConstantIndexOp>(1);
Value allocSize = one;
for (auto size : llvm::enumerate(boundingSubViewSize))
for (const auto &size : llvm::enumerate(boundingSubViewSize))
allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
layout, alignment);
@ -219,7 +219,7 @@ FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
SmallVector<OpFoldResult> partialSizes;
fullSizes.reserve(rank);
partialSizes.reserve(rank);
for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
auto rangeValue = en.value();
// Try to extract a tight constant.
LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");

View File

@ -181,7 +181,7 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
SmallVector<Attribute, 4> iteratorTypes;
for (auto attr :
for (const auto &attr :
enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
if (loopIndexToRangeIndex.count(attr.index()))
iteratorTypes.push_back(attr.value());

View File

@ -194,7 +194,7 @@ static LogicalResult padOperandToSmallestStaticBoundingBox(
SmallVector<int64_t> staticSizes;
staticSizes.reserve(shape.size());
auto shapedOp = cast<OffsetSizeAndStrideOpInterface>(sliceOp.getOperation());
for (auto en : enumerate(shapedOp.getMixedSizes())) {
for (const auto &en : enumerate(shapedOp.getMixedSizes())) {
// Skip dropped dimensions.
if (droppedDims.contains(en.index()))
continue;
@ -269,7 +269,7 @@ linalg::rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad,
// linalg op around because it uses the dims of the original results.
SmallVector<Value> paddedSubviewResults;
paddedSubviewResults.reserve(opToPad->getNumResults());
for (auto en : llvm::enumerate(paddedOp->getResults())) {
for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
Value paddedResult = en.value();
int64_t resultNumber = en.index();
int64_t rank = paddedResult.getType().cast<RankedTensorType>().getRank();
@ -443,7 +443,7 @@ LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite(
// Tile the unfused loops;
SmallVector<Value, 4> unfusedLoopTileSizes;
Value zero = rewriter.create<arith::ConstantIndexOp>(op->getLoc(), 0);
for (auto tileSize : enumerate(tileSizes)) {
for (const auto &tileSize : enumerate(tileSizes)) {
if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index()))
unfusedLoopTileSizes.push_back(zero);
else
@ -524,7 +524,7 @@ LogicalResult mlir::linalg::LinalgPaddingPattern::matchAndRewrite(
}
// Hoist the padding.
for (auto en : enumerate(depths)) {
for (const auto &en : enumerate(depths)) {
OpOperand &opOperand = paddedOp->getOpOperand(en.index());
auto padTensorOp = opOperand.get().getDefiningOp<PadTensorOp>();
if (!padTensorOp || en.value() == 0)

View File

@ -249,7 +249,7 @@ vectorizeLinalgYield(OpBuilder &b, Operation *op,
auto yieldOp = dyn_cast<linalg::YieldOp>(op);
if (!yieldOp)
return VectorizationResult{VectorizationStatus::Failure, nullptr};
for (auto outputs : llvm::enumerate(yieldOp.values())) {
for (const auto &outputs : llvm::enumerate(yieldOp.values())) {
// TODO: Scan for an opportunity for reuse.
// TODO: use a map.
Value vectorValue = bvm.lookup(outputs.value());

View File

@ -169,7 +169,7 @@ Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim) {
SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b) {
SmallVector<Value, 4> dynOperands;
auto shapedType = val.getType().cast<ShapedType>();
for (auto dim : llvm::enumerate(shapedType.getShape())) {
for (const auto &dim : llvm::enumerate(shapedType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
}
@ -310,7 +310,7 @@ tensor::ExtractSliceOp makeComposedExtractSliceOp(
SmallVector<OpFoldResult> foldedOffsets(offsets.begin(), offsets.end());
AffineExpr dim1, dim2;
bindDims(b.getContext(), dim1, dim2);
for (auto en : enumerate(producerOp.getMixedOffsets())) {
for (const auto &en : enumerate(producerOp.getMixedOffsets())) {
SmallVector<Value> offsetValues = {
getValueOrCreateConstantIndexOp(b, loc, foldedOffsets[en.index()]),
getValueOrCreateConstantIndexOp(b, loc, en.value())};
@ -403,7 +403,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
if (distributionOptions.hasValue()) {
// Collect loop ranges for parallel dimensions.
SmallVector<Range, 2> parallelLoopRanges;
for (auto iteratorType : enumerate(iteratorTypes))
for (const auto &iteratorType : enumerate(iteratorTypes))
if (isParallelIterator(iteratorType.value()))
parallelLoopRanges.push_back(loopRanges[iteratorType.index()]);
@ -435,7 +435,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
// Filter out scf.for loops that were created out of parallel dimensions.
SmallVector<scf::ForOp, 4> loops;
for (auto iteratorType : enumerate(iteratorTypes))
for (const auto &iteratorType : enumerate(iteratorTypes))
if (isParallelIterator(iteratorType.value()))
loops.push_back(loopNest.loops[iteratorType.index()]);
@ -677,7 +677,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
distributionMethod.assign(distributionOptions->distributionMethod.begin(),
distributionOptions->distributionMethod.end());
SmallVector<Range, 2> parallelLoopRanges;
for (auto iteratorType : enumerate(iteratorTypes)) {
for (const auto &iteratorType : enumerate(iteratorTypes)) {
if (isParallelIterator(iteratorType.value()))
parallelLoopRanges.push_back(loopRanges[iteratorType.index()]);
}
@ -686,7 +686,7 @@ void GenerateLoopNest<scf::ParallelOp>::doit(
SmallVector<ProcInfo, 2> procInfo =
options.procInfo(b, loc, parallelLoopRanges);
unsigned index = 0;
for (auto iteratorType : enumerate(iteratorTypes)) {
for (const auto &iteratorType : enumerate(iteratorTypes)) {
if (index >= procInfo.size())
break;
if (isParallelIterator(iteratorType.value())) {

View File

@ -395,7 +395,7 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
};
if (!checkCompatible(aOffset, bOffset))
return false;
for (auto aStride : enumerate(aStrides))
for (const auto &aStride : enumerate(aStrides))
if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
return false;
}
@ -515,7 +515,7 @@ computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
if (originalType.getRank() == reducedType.getRank())
return unusedDims;
for (auto dim : llvm::enumerate(sizes))
for (const auto &dim : llvm::enumerate(sizes))
if (auto attr = dim.value().dyn_cast<Attribute>())
if (attr.cast<IntegerAttr>().getInt() == 1)
unusedDims.insert(dim.index());
@ -1851,7 +1851,7 @@ static MemRefType getCanonicalSubViewResultType(
if (!unusedDims)
return nullptr;
SmallVector<int64_t> shape;
for (auto sizes : llvm::enumerate(nonRankReducedType.getShape())) {
for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
if (unusedDims->count(sizes.index()))
continue;
shape.push_back(sizes.value());
@ -1903,7 +1903,7 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {
// Check all size values are static and matches the (static) source shape.
ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
for (auto size : llvm::enumerate(mixedSizes)) {
for (const auto &size : llvm::enumerate(mixedSizes)) {
Optional<int64_t> intValue = getConstantIntValue(size.value());
if (!intValue || intValue.getValue() != sourceShape[size.index()])
return false;
@ -2040,7 +2040,7 @@ static MemRefType inferTransposeResultType(MemRefType memRefType,
auto originalSizes = memRefType.getShape();
// Compute permuted sizes.
SmallVector<int64_t, 4> sizes(rank, 0);
for (auto en : llvm::enumerate(permutationMap.getResults()))
for (const auto &en : llvm::enumerate(permutationMap.getResults()))
sizes[en.index()] =
originalSizes[en.value().cast<AffineDimExpr>().getPosition()];

View File

@ -249,7 +249,7 @@ verifyScheduleModifiers(OpAsmParser &parser,
SmallVectorImpl<SmallString<12>> &modifiers) {
if (modifiers.size() > 2)
return parser.emitError(parser.getNameLoc()) << " unexpected modifier(s)";
for (auto mod : modifiers) {
for (const auto &mod : modifiers) {
// Translate the string. If it has no value, then it was not a valid
// modifier!
auto symbol = symbolizeScheduleModifier(mod);

View File

@ -198,7 +198,7 @@ static LogicalResult verifyResultTypesAreInferrable(OperationOp op,
return success();
// Otherwise, make sure each of the types can be inferred.
for (auto it : llvm::enumerate(resultTypes)) {
for (const auto &it : llvm::enumerate(resultTypes)) {
Operation *resultTypeOp = it.value().getDefiningOp();
assert(resultTypeOp && "expected valid result type operation");

View File

@ -1247,7 +1247,7 @@ struct RemoveUnusedResults : public OpRewritePattern<IfOp> {
// Replace the operation by the new one.
SmallVector<Value, 4> repResults(op.getNumResults());
for (auto en : llvm::enumerate(usedResults))
for (const auto &en : llvm::enumerate(usedResults))
repResults[en.value().getResultNumber()] = newOp.getResult(en.index());
rewriter.replaceOp(op, repResults);
return success();
@ -1296,7 +1296,8 @@ struct ConvertTrivialIfToSelect : public OpRewritePattern<IfOp> {
SmallVector<Value> results(op->getNumResults());
assert(thenYieldArgs.size() == results.size());
assert(elseYieldArgs.size() == results.size());
for (auto it : llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) {
for (const auto &it :
llvm::enumerate(llvm::zip(thenYieldArgs, elseYieldArgs))) {
Value trueVal = std::get<0>(it.value());
Value falseVal = std::get<1>(it.value());
if (trueVal == falseVal)
@ -1564,7 +1565,7 @@ struct CombineIfs : public OpRewritePattern<IfOp> {
SmallVector<Value> prevValues;
SmallVector<Value> nextValues;
for (auto pair : llvm::enumerate(combinedIf.getResults())) {
for (const auto &pair : llvm::enumerate(combinedIf.getResults())) {
if (pair.index() < prevIf.getNumResults())
prevValues.push_back(pair.value());
else
@ -2368,7 +2369,7 @@ struct WhileUnusedResult : public OpRewritePattern<WhileOp> {
SmallVector<Type> newResultTypes;
SmallVector<Value> newTermArgs;
bool needUpdate = false;
for (auto it :
for (const auto &it :
llvm::enumerate(llvm::zip(op.getResults(), afterArgs, termArgs))) {
auto i = static_cast<unsigned>(it.index());
Value result = std::get<0>(it.value());
@ -2403,7 +2404,7 @@ struct WhileUnusedResult : public OpRewritePattern<WhileOp> {
// null).
SmallVector<Value> newResults(op.getNumResults());
SmallVector<Value> newAfterBlockArgs(op.getNumResults());
for (auto it : llvm::enumerate(newResultsIndices)) {
for (const auto &it : llvm::enumerate(newResultsIndices)) {
newResults[it.value()] = newWhile.getResult(it.index());
newAfterBlockArgs[it.value()] = newAfterBlock.getArgument(it.index());
}

View File

@ -69,7 +69,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern<ForOp> {
// Rewrite uses of the for-loop block arguments to the new while-loop
// "after" arguments
for (auto barg : enumerate(forOp.getBody(0)->getArguments()))
for (const auto &barg : enumerate(forOp.getBody(0)->getArguments()))
barg.value().replaceAllUsesWith(afterBlock->getArgument(barg.index()));
// Inline for-loop body operations into 'after' region.
@ -87,7 +87,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern<ForOp> {
// an extra value (the induction variable escapes the loop through being
// carried in the set of iterargs). Instead, rewrite uses of the forOp
// results.
for (auto arg : llvm::enumerate(forOp.getResults()))
for (const auto &arg : llvm::enumerate(forOp.getResults()))
arg.value().replaceAllUsesWith(whileOp.getResult(arg.index() + 1));
rewriter.eraseOp(forOp);

View File

@ -198,7 +198,7 @@ scf::ForOp LoopPipelinerInternal::createKernelLoop(
llvm::SmallVector<Value> newLoopArg;
// For existing loop argument initialize them with the right version from the
// prologue.
for (auto retVal :
for (const auto &retVal :
llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) {
Operation *def = retVal.value().getDefiningOp();
assert(def && "Only support loop carried dependencies of distance 1");
@ -245,7 +245,7 @@ void LoopPipelinerInternal::createKernel(
rewriter.setInsertionPoint(newForOp.getBody(), newForOp.getBody()->begin());
BlockAndValueMapping mapping;
mapping.map(forOp.getInductionVar(), newForOp.getInductionVar());
for (auto arg : llvm::enumerate(forOp.getRegionIterArgs())) {
for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) {
mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]);
}
for (Operation *op : opOrder) {
@ -325,7 +325,7 @@ void LoopPipelinerInternal::createKernel(
yieldOperands.push_back(mapping.lookupOrDefault(it.first));
}
// Map the yield operand to the forOp returned value.
for (auto retVal :
for (const auto &retVal :
llvm::enumerate(forOp.getBody()->getTerminator()->getOperands())) {
Operation *def = retVal.value().getDefiningOp();
assert(def && "Only support loop carried dependencies of distance 1");

View File

@ -160,8 +160,8 @@ mlir::scf::tileParallelLoop(ParallelOp op, ArrayRef<int64_t> tileSizes,
ifInbound.getThenRegion().takeBody(op.getRegion());
Block &thenBlock = ifInbound.getThenRegion().front();
b.setInsertionPointToStart(innerLoop.getBody());
for (auto ivs : llvm::enumerate(llvm::zip(innerLoop.getInductionVars(),
outerLoop.getInductionVars()))) {
for (const auto &ivs : llvm::enumerate(llvm::zip(
innerLoop.getInductionVars(), outerLoop.getInductionVars()))) {
auto newIndex = b.create<arith::AddIOp>(
op.getLoc(), std::get<0>(ivs.value()), std::get<1>(ivs.value()));
thenBlock.getArgument(ivs.index())

View File

@ -182,7 +182,7 @@ LogicalResult ProcessInterfaceVarABI::matchAndRewrite(
auto indexType = typeConverter.getIndexType();
auto attrName = spirv::getInterfaceVarABIAttrName();
for (auto argType : llvm::enumerate(funcOp.getType().getInputs())) {
for (const auto &argType : llvm::enumerate(funcOp.getType().getInputs())) {
auto abiInfo = funcOp.getArgAttrOfType<spirv::InterfaceVarABIAttr>(
argType.index(), attrName);
if (!abiInfo) {

View File

@ -235,7 +235,7 @@ getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) {
return llvm::None;
int64_t memrefSize = -1;
for (auto shape : enumerate(dims))
for (const auto &shape : enumerate(dims))
memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]);
return (offset + memrefSize) * elementSize.getValue();
@ -557,7 +557,7 @@ FuncOpConversion::matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
return failure();
TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
for (auto argType : enumerate(fnType.getInputs())) {
for (const auto &argType : enumerate(fnType.getInputs())) {
auto convertedType = getTypeConverter()->convertType(argType.value());
if (!convertedType)
return failure();
@ -778,7 +778,7 @@ Value mlir::spirv::linearizeIndex(ValueRange indices, ArrayRef<int64_t> strides,
Value linearizedIndex = builder.create<spirv::ConstantOp>(
loc, integerType, IntegerAttr::get(integerType, offset));
for (auto index : llvm::enumerate(indices)) {
for (const auto &index : llvm::enumerate(indices)) {
Value strideVal = builder.create<spirv::ConstantOp>(
loc, integerType,
IntegerAttr::get(integerType, strides[index.index()]));

View File

@ -1669,7 +1669,7 @@ static LogicalResult verify(ReduceOp op) {
"ReduceOp operates on an extent tensor");
}
for (auto type : llvm::enumerate(op.getInitVals()))
for (const auto &type : llvm::enumerate(op.getInitVals()))
if (block.getArgument(type.index() + 2).getType() != type.value().getType())
return op.emitOpError()
<< "type mismatch between argument " << type.index() + 2

View File

@ -1017,7 +1017,7 @@ llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
ArrayRef<int64_t> resultShape = getType().getShape();
SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
unsigned shapePos = 0;
for (auto size : enumerate(mixedSizes)) {
for (const auto &size : enumerate(mixedSizes)) {
Optional<int64_t> sizeVal = getConstantIntValue(size.value());
// If the size is not 1, or if the current matched dimension of the result
// is the same static shape as the size value (which is 1), then the
@ -1039,7 +1039,7 @@ LogicalResult ExtractSliceOp::reifyResultShapes(
SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
Location loc = getLoc();
for (auto size : enumerate(mixedSizes)) {
for (const auto &size : enumerate(mixedSizes)) {
if (droppedDims.count(size.index()))
continue;
if (auto attr = size.value().dyn_cast<Attribute>()) {

View File

@ -126,7 +126,7 @@ public:
// 1. Separate reduction and parallel dims.
SmallVector<int64_t, 4> parallelDims, parallelShapes;
SmallVector<int64_t, 4> reductionDims, reductionShapes;
for (auto it : llvm::enumerate(reductionMask)) {
for (const auto &it : llvm::enumerate(reductionMask)) {
int64_t i = it.index();
bool isReduction = it.value();
if (isReduction) {

View File

@ -270,7 +270,7 @@ void vector::MultiDimReductionOp::build(OpBuilder &builder,
result.addTypes(targetType);
SmallVector<int64_t> reductionDims;
for (auto en : llvm::enumerate(reductionMask))
for (const auto &en : llvm::enumerate(reductionMask))
if (en.value())
reductionDims.push_back(en.index());
result.addAttribute(getReductionDimsAttrName(),
@ -615,7 +615,7 @@ static LogicalResult verify(ContractionOp op) {
// that the number of map outputs equals the rank of its associated
// vector operand.
unsigned numIterators = op.iterator_types().getValue().size();
for (auto it : llvm::enumerate(op.indexing_maps())) {
for (const auto &it : llvm::enumerate(op.indexing_maps())) {
auto index = it.index();
auto map = it.value().cast<AffineMapAttr>().getValue();
if (map.getNumSymbols() != 0)
@ -695,7 +695,7 @@ static std::vector<std::pair<int64_t, int64_t>>
getDimMap(ArrayRef<AffineMap> indexingMaps, ArrayAttr iteratorTypes,
StringRef targetIteratorTypeName, MLIRContext *context) {
std::vector<std::pair<int64_t, int64_t>> dimMap;
for (auto it : llvm::enumerate(iteratorTypes)) {
for (const auto &it : llvm::enumerate(iteratorTypes)) {
auto iteratorTypeName = it.value().cast<StringAttr>().getValue();
if (iteratorTypeName != targetIteratorTypeName)
continue;
@ -715,7 +715,7 @@ void ContractionOp::getIterationBounds(
auto resVectorType = getResultType().dyn_cast<VectorType>();
SmallVector<AffineMap, 4> indexingMaps(getIndexingMaps());
SmallVector<int64_t, 2> iterationShape;
for (auto it : llvm::enumerate(iterator_types())) {
for (const auto &it : llvm::enumerate(iterator_types())) {
// Search lhs/rhs map results for 'targetExpr'.
auto targetExpr = getAffineDimExpr(it.index(), getContext());
auto iteratorTypeName = it.value().cast<StringAttr>().getValue();
@ -738,7 +738,7 @@ void ContractionOp::getIterationIndexMap(
std::vector<DenseMap<int64_t, int64_t>> &iterationIndexMap) {
unsigned numMaps = indexing_maps().getValue().size();
iterationIndexMap.resize(numMaps);
for (auto it : llvm::enumerate(indexing_maps())) {
for (const auto &it : llvm::enumerate(indexing_maps())) {
auto index = it.index();
auto map = it.value().cast<AffineMapAttr>().getValue();
for (unsigned i = 0, e = map.getNumResults(); i < e; ++i) {
@ -933,7 +933,7 @@ static LogicalResult verify(vector::ExtractOp op) {
if (positionAttr.size() > static_cast<unsigned>(op.getVectorType().getRank()))
return op.emitOpError(
"expected position attribute of rank smaller than vector rank");
for (auto en : llvm::enumerate(positionAttr)) {
for (const auto &en : llvm::enumerate(positionAttr)) {
auto attr = en.value().dyn_cast<IntegerAttr>();
if (!attr || attr.getInt() < 0 ||
attr.getInt() >= op.getVectorType().getDimSize(en.index()))
@ -1511,7 +1511,7 @@ static LogicalResult verify(ShuffleOp op) {
return op.emitOpError("mask length mismatch");
// Verify all indices.
int64_t indexSize = v1Type.getDimSize(0) + v2Type.getDimSize(0);
for (auto en : llvm::enumerate(maskAttr)) {
for (const auto &en : llvm::enumerate(maskAttr)) {
auto attr = en.value().dyn_cast<IntegerAttr>();
if (!attr || attr.getInt() < 0 || attr.getInt() >= indexSize)
return op.emitOpError("mask index #")
@ -1621,7 +1621,7 @@ static LogicalResult verify(InsertOp op) {
(positionAttr.size() != static_cast<unsigned>(destVectorType.getRank())))
return op.emitOpError(
"expected position attribute rank to match the dest vector rank");
for (auto en : llvm::enumerate(positionAttr)) {
for (const auto &en : llvm::enumerate(positionAttr)) {
auto attr = en.value().dyn_cast<IntegerAttr>();
if (!attr || attr.getInt() < 0 ||
attr.getInt() >= destVectorType.getDimSize(en.index()))
@ -2822,7 +2822,7 @@ public:
newIndices.push_back(getValueOrCreateConstantIndexOp(
rewriter, extractOp.getLoc(), offset));
}
for (auto it : llvm::enumerate(xferOp.indices())) {
for (const auto &it : llvm::enumerate(xferOp.indices())) {
OpFoldResult offset =
extractOp.getMixedOffsets()[it.index() + rankReduced];
newIndices.push_back(rewriter.create<arith::AddIOp>(
@ -3913,7 +3913,7 @@ static LogicalResult verify(vector::TransposeOp op) {
if (rank != size)
return op.emitOpError("transposition length mismatch: ") << size;
SmallVector<bool, 8> seen(rank, false);
for (auto ta : llvm::enumerate(transpAttr)) {
for (const auto &ta : llvm::enumerate(transpAttr)) {
int64_t i = ta.value().cast<IntegerAttr>().getInt();
if (i < 0 || i >= rank)
return op.emitOpError("transposition index out of range: ") << i;
@ -4004,7 +4004,7 @@ static LogicalResult verify(ConstantMaskOp &op) {
// result dimension size.
auto resultShape = resultType.getShape();
SmallVector<int64_t, 4> maskDimSizes;
for (auto it : llvm::enumerate(op.mask_dim_sizes())) {
for (const auto &it : llvm::enumerate(op.mask_dim_sizes())) {
int64_t attrValue = it.value().cast<IntegerAttr>().getInt();
if (attrValue < 0 || attrValue > resultShape[it.index()])
return op.emitOpError(

View File

@ -79,7 +79,7 @@ struct TransferReadPermutationLowering
// Apply the reverse transpose to deduce the type of the transfer_read.
ArrayRef<int64_t> originalShape = op.getVectorType().getShape();
SmallVector<int64_t> newVectorShape(originalShape.size());
for (auto pos : llvm::enumerate(permutation)) {
for (const auto &pos : llvm::enumerate(permutation)) {
newVectorShape[pos.value()] = originalShape[pos.index()];
}

View File

@ -53,7 +53,7 @@ static Optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
static SmallVector<Attribute, 4> adjustIter(ArrayAttr iteratorTypes,
int64_t index) {
SmallVector<Attribute, 4> results;
for (auto it : llvm::enumerate(iteratorTypes)) {
for (const auto &it : llvm::enumerate(iteratorTypes)) {
int64_t idx = it.index();
if (idx == index)
continue;
@ -871,7 +871,7 @@ struct MultiReduceToContract
auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size());
SmallVector<AffineExpr> exprs;
SmallVector<StringRef> iteratorTypes;
for (auto isReduceDim : llvm::enumerate(reductionMask)) {
for (const auto &isReduceDim : llvm::enumerate(reductionMask)) {
if (!isReduceDim.value()) {
iteratorTypes.push_back(getParallelIteratorTypeName());
exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index()));
@ -997,7 +997,7 @@ struct CombineContractBroadcast
broadcast.getVectorType().getRank() - srcType.getRank();
bool innerDimBroadcast = false;
SmallVector<AffineExpr> originalDims;
for (auto dim : llvm::enumerate(srcType.getShape())) {
for (const auto &dim : llvm::enumerate(srcType.getShape())) {
if (dim.value() !=
broadcast.getVectorType().getDimSize(rankDiff + dim.index())) {
innerDimBroadcast = true;

View File

@ -52,7 +52,7 @@ sliceTransferIndices(int64_t index, ArrayRef<int64_t> originalShape,
getVectorOffset(originalShape, targetShape, index);
// Compute 'sliceIndices' by adding 'sliceOffsets[i]' to 'indices[i]'.
SmallVector<Value> slicedIndices(indices.begin(), indices.end());
for (auto dim : llvm::enumerate(permutationMap.getResults())) {
for (const auto &dim : llvm::enumerate(permutationMap.getResults())) {
if (isBroadcast(dim.value()))
continue;
unsigned pos = dim.value().cast<AffineDimExpr>().getPosition();
@ -429,7 +429,7 @@ struct ContractExtractPattern : public OpRewritePattern<vector::ExtractMapOp> {
for (unsigned i : llvm::seq(unsigned(0), affineMap.getNumResults()))
map[affineMap.getDimPosition(i)] = extract.getResultType().getDimSize(i);
SmallVector<Value, 4> extractOperands;
for (auto it : llvm::enumerate(contract.getIndexingMaps())) {
for (const auto &it : llvm::enumerate(contract.getIndexingMaps())) {
// For each operands calculate the new vector type after distribution.
Value operand = contract->getOperand(it.index());
auto vecType = operand.getType().cast<VectorType>();

View File

@ -1022,7 +1022,7 @@ static AffineExpr getSemiAffineExprFromFlatForm(ArrayRef<int64_t> flatExprs,
// as lhs/rhs, and store the indices, constant coefficient corresponding to
// the indices in `coefficients` map, and affine expression corresponding to
// in indices in `indexToExprMap` map.
for (auto it : llvm::enumerate(localExprs)) {
for (const auto &it : llvm::enumerate(localExprs)) {
AffineExpr expr = it.value();
if (flatExprs[numDims + numSymbols + it.index()] == 0)
continue;

View File

@ -121,7 +121,7 @@ bool AffineMap::isMinorIdentityWithBroadcasting(
if (getNumDims() < getNumResults())
return false;
unsigned suffixStart = getNumDims() - getNumResults();
for (auto idxAndExpr : llvm::enumerate(getResults())) {
for (const auto &idxAndExpr : llvm::enumerate(getResults())) {
unsigned resIdx = idxAndExpr.index();
AffineExpr expr = idxAndExpr.value();
if (auto constExpr = expr.dyn_cast<AffineConstantExpr>()) {
@ -168,7 +168,7 @@ bool AffineMap::isPermutationOfMinorIdentityWithBroadcasting(
getNumResults() > getNumInputs() ? getNumResults() - getNumInputs() : 0;
llvm::SmallBitVector dimFound(std::max(getNumInputs(), getNumResults()),
false);
for (auto idxAndExpr : llvm::enumerate(getResults())) {
for (const auto &idxAndExpr : llvm::enumerate(getResults())) {
unsigned resIdx = idxAndExpr.index();
AffineExpr expr = idxAndExpr.value();
// Each result may be either a constant 0 (broadcast dimension) or a
@ -675,7 +675,7 @@ AffineMap mlir::inversePermutation(AffineMap map) {
return map;
assert(map.getNumSymbols() == 0 && "expected map without symbols");
SmallVector<AffineExpr, 4> exprs(map.getNumDims());
for (auto en : llvm::enumerate(map.getResults())) {
for (const auto &en : llvm::enumerate(map.getResults())) {
auto expr = en.value();
// Skip non-permutations.
if (auto d = expr.dyn_cast<AffineDimExpr>()) {

View File

@ -1036,7 +1036,7 @@ AffineMap mlir::makeStridedLinearLayoutMap(ArrayRef<int64_t> strides,
}
// AffineExpr for strides.
for (auto en : llvm::enumerate(strides)) {
for (const auto &en : llvm::enumerate(strides)) {
auto dim = en.index();
auto stride = en.value();
assert(stride != 0 && "Invalid stride specification");

View File

@ -316,7 +316,7 @@ OperationVerifier::verifyDominanceOfContainedRegions(Operation &op,
for (Operation &op : block) {
if (isReachable) {
// Check that operands properly dominate this use.
for (auto operand : llvm::enumerate(op.getOperands())) {
for (const auto &operand : llvm::enumerate(op.getOperands())) {
if (domInfo.properlyDominates(operand.value(), &op))
continue;

View File

@ -131,7 +131,7 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
<< succInputsTypes.size();
}
for (auto typesIdx :
for (const auto &typesIdx :
llvm::enumerate(llvm::zip(*sourceTypes, succInputsTypes))) {
Type sourceType = std::get<0>(typesIdx.value());
Type inputType = std::get<1>(typesIdx.value());
@ -266,7 +266,7 @@ bool mlir::insideMutuallyExclusiveRegions(Operation *a, Operation *b) {
return false;
// Compute index of region.
int64_t beginIndex = -1;
for (auto it : llvm::enumerate(branchOp->getRegions()))
for (const auto &it : llvm::enumerate(branchOp->getRegions()))
if (&it.value() == begin)
beginIndex = it.index();
assert(beginIndex != -1 && "could not find region in op");

View File

@ -189,7 +189,7 @@ LogicalResult mlir::detail::inferReturnTensorTypes(
if (failed(componentTypeFn(context, location, operands, attributes, regions,
retComponents)))
return failure();
for (auto shapeAndType : retComponents) {
for (const auto &shapeAndType : retComponents) {
assert(shapeAndType.getAttribute() == nullptr && "attribute not supported");
if (shapeAndType.hasRank())
inferredReturnTypes.push_back(RankedTensorType::get(

View File

@ -41,7 +41,7 @@ static void applyPatterns(Region &region,
std::vector<Operation *> opsNotInRange;
std::vector<Operation *> opsInRange;
size_t keepIndex = 0;
for (auto op : enumerate(region.getOps())) {
for (const auto &op : enumerate(region.getOps())) {
int index = op.index();
if (keepIndex < rangeToKeep.size() &&
index == rangeToKeep[keepIndex].second)

View File

@ -198,9 +198,9 @@ public:
maxTypeRangeMemoryIndex(maxTypeRangeMemoryIndex),
maxValueRangeMemoryIndex(maxValueRangeMemoryIndex),
maxLoopLevel(maxLoopLevel) {
for (auto it : llvm::enumerate(constraintFns))
for (const auto &it : llvm::enumerate(constraintFns))
constraintToMemIndex.try_emplace(it.value().first(), it.index());
for (auto it : llvm::enumerate(rewriteFns))
for (const auto &it : llvm::enumerate(rewriteFns))
externalRewriterToMemIndex.try_emplace(it.value().first(), it.index());
}
@ -631,7 +631,7 @@ void Generator::allocateMemoryIndices(FuncOp matcherFunc,
ByteCodeLiveRange &defRange = defIt.second;
// Try to allocate to an existing index.
for (auto existingIndexIt : llvm::enumerate(allocatedIndices)) {
for (const auto &existingIndexIt : llvm::enumerate(allocatedIndices)) {
ByteCodeLiveRange &existingRange = existingIndexIt.value();
if (!defRange.overlaps(existingRange)) {
existingRange.unionWith(defRange);

View File

@ -53,7 +53,7 @@ void PatternApplicator::applyCostModel(CostModel model) {
// Apply the cost model to the bytecode patterns first, and then the native
// patterns.
if (const PDLByteCode *bytecode = frozenPatternList.getPDLByteCode()) {
for (auto it : llvm::enumerate(bytecode->getPatterns()))
for (const auto &it : llvm::enumerate(bytecode->getPatterns()))
mutableByteCodeState->updatePatternBenefit(it.index(), model(it.value()));
}

View File

@ -821,7 +821,7 @@ LogicalResult Importer::processFunction(llvm::Function *f) {
currentEntryBlock = blockList[0];
// Add function arguments to the entry block.
for (auto kv : llvm::enumerate(f->args()))
for (const auto &kv : llvm::enumerate(f->args()))
instMap[&kv.value()] =
blockList[0]->addArgument(functionType.getParamType(kv.index()));

View File

@ -381,7 +381,7 @@ static Value getPHISourceValue(Block *current, Block *pred,
// the case branch that was taken.
if (switchOp.getDefaultDestination() == current)
return switchOp.getDefaultOperands()[index];
for (auto i : llvm::enumerate(switchOp.getCaseDestinations()))
for (const auto &i : llvm::enumerate(switchOp.getCaseDestinations()))
if (i.value() == current)
return switchOp.getCaseOperands(i.index())[index];
}

View File

@ -25,7 +25,7 @@ static void updateFuncOp(FuncOp func,
// Collect information about the results will become appended arguments.
SmallVector<Type, 6> erasedResultTypes;
SmallVector<unsigned, 6> erasedResultIndices;
for (auto resultType : llvm::enumerate(functionType.getResults())) {
for (const auto &resultType : llvm::enumerate(functionType.getResults())) {
if (resultType.value().isa<BaseMemRefType>()) {
erasedResultIndices.push_back(resultType.index());
erasedResultTypes.push_back(resultType.value());

View File

@ -84,7 +84,7 @@ static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) {
OpBuilder bOuter(forOp);
// Put together alloc operands for any dynamic dimensions of the memref.
SmallVector<Value, 4> allocOperands;
for (auto dim : llvm::enumerate(oldMemRefType.getShape())) {
for (const auto &dim : llvm::enumerate(oldMemRefType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
allocOperands.push_back(bOuter.createOrFold<memref::DimOp>(
forOp.getLoc(), oldMemRef, dim.index()));

View File

@ -252,7 +252,7 @@ public:
op->setLoc(loc);
op->setAttrs(attrs);
op->setOperands(operands);
for (auto it : llvm::enumerate(successors))
for (const auto &it : llvm::enumerate(successors))
op->setSuccessor(it.value(), it.index());
}
@ -1256,7 +1256,7 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
remapped.reserve(llvm::size(values));
SmallVector<Type, 1> legalTypes;
for (auto it : llvm::enumerate(values)) {
for (const auto &it : llvm::enumerate(values)) {
Value operand = it.value();
Type origType = operand.getType();

View File

@ -215,7 +215,7 @@ inlineRegionImpl(InlinerInterface &interface, Region *src, Block *inlineBlock,
} else {
// Otherwise, there were multiple blocks inlined. Add arguments to the post
// insertion block to represent the results to replace.
for (auto resultToRepl : llvm::enumerate(resultsToReplace)) {
for (const auto &resultToRepl : llvm::enumerate(resultsToReplace)) {
resultToRepl.value().replaceAllUsesWith(postInsertBlock->addArgument(
regionResultTypes[resultToRepl.index()]));
}

View File

@ -3353,7 +3353,7 @@ createFullTiles(MutableArrayRef<AffineForOp> inputNest,
// Add the body for the full tile loop nest.
BlockAndValueMapping operandMap;
for (auto loopEn : llvm::enumerate(inputNest))
for (const auto &loopEn : llvm::enumerate(inputNest))
operandMap.map(loopEn.value().getInductionVar(),
fullTileLoops[loopEn.index()].getInductionVar());
b = OpBuilder::atBlockTerminator(fullTileLoops.back().getBody());

View File

@ -589,7 +589,7 @@ LogicalResult BlockMergeCluster::merge(RewriterBase &rewriter) {
1 + blocksToMerge.size(),
SmallVector<Value, 8>(operandsToMerge.size()));
unsigned curOpIndex = 0;
for (auto it : llvm::enumerate(operandsToMerge)) {
for (const auto &it : llvm::enumerate(operandsToMerge)) {
unsigned nextOpOffset = it.value().first - curOpIndex;
curOpIndex = it.value().first;