Use llvm::append_range where applicable

It knows the size, so no need to call reserve beforehand. NFCI.
This commit is contained in:
Benjamin Kramer 2022-03-18 20:05:12 +01:00
parent 5dde9c1286
commit 89d8035e36
9 changed files with 20 additions and 54 deletions

View File

@ -220,11 +220,6 @@ Type LLVMTypeConverter::convertFunctionSignature(
result.addInputs(en.index(), converted);
}
SmallVector<Type, 8> argTypes;
argTypes.reserve(llvm::size(result.getConvertedTypes()));
for (Type type : result.getConvertedTypes())
argTypes.push_back(type);
// If function does not return anything, create the void result type,
// if it returns on element, convert it, otherwise pack the result types into
// a struct.
@ -233,7 +228,8 @@ Type LLVMTypeConverter::convertFunctionSignature(
: packFunctionResults(funcTy.getResults());
if (!resultType)
return {};
return LLVM::LLVMFunctionType::get(resultType, argTypes, isVariadic);
return LLVM::LLVMFunctionType::get(resultType, result.getConvertedTypes(),
isVariadic);
}
/// Converts the function type to a C-compatible format, in particular using

View File

@ -1218,22 +1218,14 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
if (auto loadOp = dyn_cast<AffineReadOpInterface>(loadOrStoreOpInst)) {
memref = loadOp.getMemRef();
opInst = loadOrStoreOpInst;
auto loadMemrefType = loadOp.getMemRefType();
indices.reserve(loadMemrefType.getRank());
for (auto index : loadOp.getMapOperands()) {
indices.push_back(index);
}
llvm::append_range(indices, loadOp.getMapOperands());
} else {
assert(isa<AffineWriteOpInterface>(loadOrStoreOpInst) &&
"Affine read/write op expected");
auto storeOp = cast<AffineWriteOpInterface>(loadOrStoreOpInst);
opInst = loadOrStoreOpInst;
memref = storeOp.getMemRef();
auto storeMemrefType = storeOp.getMemRefType();
indices.reserve(storeMemrefType.getRank());
for (auto index : storeOp.getMapOperands()) {
indices.push_back(index);
}
llvm::append_range(indices, storeOp.getMapOperands());
}
}

View File

@ -527,10 +527,8 @@ public:
void addToNode(unsigned id, const SmallVectorImpl<Operation *> &loads,
const SmallVectorImpl<Operation *> &stores) {
Node *node = getNode(id);
for (auto *loadOpInst : loads)
node->loads.push_back(loadOpInst);
for (auto *storeOpInst : stores)
node->stores.push_back(storeOpInst);
llvm::append_range(node->loads, loads);
llvm::append_range(node->stores, stores);
}
void clearNodeLoadAndStores(unsigned id) {

View File

@ -2660,9 +2660,7 @@ static AffineIfOp createSeparationCondition(MutableArrayRef<AffineForOp> loops,
FlatAffineValueConstraints cst;
SmallVector<Operation *, 8> ops;
ops.reserve(loops.size());
for (AffineForOp forOp : loops)
ops.push_back(forOp);
llvm::append_range(ops, loops);
(void)getIndexSet(ops, &cst);
// Remove constraints that are independent of these loop IVs.

View File

@ -256,15 +256,11 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
SmallVector<Value> workList;
func->walk([&](cf::CondBranchOp condBr) {
for (auto operand : condBr.getOperands()) {
workList.push_back(operand);
}
llvm::append_range(workList, condBr.getOperands());
});
func->walk([&](cf::BranchOp br) {
for (auto operand : br.getOperands()) {
workList.push_back(operand);
}
llvm::append_range(workList, br.getOperands());
});
DenseSet<Value> visitedValues;
@ -310,8 +306,7 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
// detensorable and if so, their operands will be added to workList to
// potentially discover other parts of the detensorable component.
for (auto *user : currentItem.getUsers())
for (Value result : user->getResults())
workList.push_back(result);
llvm::append_range(workList, user->getResults());
// 2 - Look backward:
// 2.1 - The current item is defined by a block argument. If the owner
@ -383,10 +378,7 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
}
opsToDetensor.insert(genericOp);
for (Value genericOpOperand : genericOp.inputs())
workList.push_back(genericOpOperand);
llvm::append_range(workList, genericOp.inputs());
continue;
}
@ -405,8 +397,7 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
if (llvm::all_of(
currentItemDefiningOp->getResultTypes(),
[&](Type resultType) { return resultType.isIntOrFloat(); }))
for (Value scalarOpOperand : currentItemDefiningOp->getOperands())
workList.push_back(scalarOpOperand);
llvm::append_range(workList, currentItemDefiningOp->getOperands());
}
// Since the cost model gives up on some ops (see the details of step 2.2

View File

@ -177,7 +177,7 @@ static Value genAllocaScalar(ConversionPatternRewriter &rewriter, Location loc,
/// Generates a temporary buffer of the given type and given contents.
static Value genBuffer(ConversionPatternRewriter &rewriter, Location loc,
ArrayRef<Value> values) {
ValueRange values) {
unsigned sz = values.size();
assert(sz >= 1);
Value buffer = genAlloca(rewriter, loc, sz, values[0].getType());
@ -205,10 +205,7 @@ static void newParams(ConversionPatternRewriter &rewriter,
params.push_back(genBuffer(rewriter, loc, attrs));
// Dimension sizes array of the enveloping tensor. Useful for either
// verification of external data, or for construction of internal data.
SmallVector<Value, 4> sizes;
for (Value s : szs)
sizes.push_back(s);
params.push_back(genBuffer(rewriter, loc, sizes));
params.push_back(genBuffer(rewriter, loc, szs));
// Dimension order permutation array. This is the "identity" permutation by
// default, or otherwise the "reverse" permutation of a given ordering, so
// that indices can be mapped quickly to the right position.

View File

@ -116,8 +116,7 @@ Optional<SmallVector<ReassociationIndices>> mlir::composeReassociationIndices(
for (ReassociationIndicesRef consumerIndices : consumerReassociations) {
ReassociationIndices reassociations;
for (int64_t consumerIndex : consumerIndices) {
for (int64_t producerIndex : producerReassociations[consumerIndex])
reassociations.push_back(producerIndex);
llvm::append_range(reassociations, producerReassociations[consumerIndex]);
}
composedIndices.push_back(std::move(reassociations));
}

View File

@ -727,19 +727,16 @@ AffineMap mlir::getProjectedMap(AffineMap map,
//===----------------------------------------------------------------------===//
MutableAffineMap::MutableAffineMap(AffineMap map)
: numDims(map.getNumDims()), numSymbols(map.getNumSymbols()),
context(map.getContext()) {
for (auto result : map.getResults())
results.push_back(result);
}
: results(map.getResults().begin(), map.getResults().end()),
numDims(map.getNumDims()), numSymbols(map.getNumSymbols()),
context(map.getContext()) {}
void MutableAffineMap::reset(AffineMap map) {
results.clear();
numDims = map.getNumDims();
numSymbols = map.getNumSymbols();
context = map.getContext();
for (auto result : map.getResults())
results.push_back(result);
llvm::append_range(results, map.getResults());
}
bool MutableAffineMap::isMultipleOf(unsigned idx, int64_t factor) const {

View File

@ -303,9 +303,7 @@ convertOperationImpl(Operation &opInst, llvm::IRBuilderBase &builder,
// TODO: refactor function type creation which usually occurs in std-LLVM
// conversion.
SmallVector<Type, 8> operandTypes;
operandTypes.reserve(inlineAsmOp.getOperands().size());
for (auto t : inlineAsmOp.getOperands().getTypes())
operandTypes.push_back(t);
llvm::append_range(operandTypes, inlineAsmOp.getOperands().getTypes());
Type resultType;
if (inlineAsmOp.getNumResults() == 0) {