mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-01-27 03:48:33 +00:00
[MLIR][NFC] Adopt variadic isa<>
Differential Revision: https://reviews.llvm.org/D82489
This commit is contained in:
parent
47ac45332e
commit
d891d738d9
@ -859,8 +859,7 @@ void mlir::getDependenceComponents(
|
||||
// Collect all load and store ops in loop nest rooted at 'forOp'.
|
||||
SmallVector<Operation *, 8> loadAndStoreOpInsts;
|
||||
forOp.getOperation()->walk([&](Operation *opInst) {
|
||||
if (isa<AffineReadOpInterface>(opInst) ||
|
||||
isa<AffineWriteOpInterface>(opInst))
|
||||
if (isa<AffineReadOpInterface, AffineWriteOpInterface>(opInst))
|
||||
loadAndStoreOpInsts.push_back(opInst);
|
||||
});
|
||||
|
||||
|
@ -291,8 +291,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
|
||||
|
||||
// No vectorization across unknown regions.
|
||||
auto regions = matcher::Op([](Operation &op) -> bool {
|
||||
return op.getNumRegions() != 0 &&
|
||||
!(isa<AffineIfOp>(op) || isa<AffineForOp>(op));
|
||||
return op.getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(op);
|
||||
});
|
||||
SmallVector<NestedMatch, 8> regionsMatched;
|
||||
regions.match(forOp, ®ionsMatched);
|
||||
|
@ -145,7 +145,7 @@ NestedPattern For(FilterFunctionType filter, ArrayRef<NestedPattern> nested) {
|
||||
}
|
||||
|
||||
bool isLoadOrStore(Operation &op) {
|
||||
return isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op);
|
||||
return isa<AffineLoadOp, AffineStoreOp>(op);
|
||||
}
|
||||
|
||||
} // end namespace matcher
|
||||
|
@ -85,8 +85,7 @@ static void getBackwardSliceImpl(Operation *op,
|
||||
if (!op)
|
||||
return;
|
||||
|
||||
assert((op->getNumRegions() == 0 || isa<AffineForOp>(op) ||
|
||||
isa<scf::ForOp>(op)) &&
|
||||
assert((op->getNumRegions() == 0 || isa<AffineForOp, scf::ForOp>(op)) &&
|
||||
"unexpected generic op with regions");
|
||||
|
||||
// Evaluate whether we should keep this def.
|
||||
|
@ -196,7 +196,7 @@ LogicalResult MemRefRegion::unionBoundingBox(const MemRefRegion &other) {
|
||||
LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
|
||||
ComputationSliceState *sliceState,
|
||||
bool addMemRefDimBounds) {
|
||||
assert((isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op)) &&
|
||||
assert((isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) &&
|
||||
"affine read/write op expected");
|
||||
|
||||
MemRefAccess access(op);
|
||||
|
@ -141,9 +141,8 @@ bool mlir::isValidDim(Value value) {
|
||||
// This value has to be a block argument for an op that has the
|
||||
// `AffineScope` trait or for an affine.for or affine.parallel.
|
||||
auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
|
||||
return parentOp &&
|
||||
(parentOp->hasTrait<OpTrait::AffineScope>() ||
|
||||
isa<AffineForOp>(parentOp) || isa<AffineParallelOp>(parentOp));
|
||||
return parentOp && (parentOp->hasTrait<OpTrait::AffineScope>() ||
|
||||
isa<AffineForOp, AffineParallelOp>(parentOp));
|
||||
}
|
||||
|
||||
// Value can be used as a dimension id iff it meets one of the following
|
||||
@ -165,7 +164,7 @@ bool mlir::isValidDim(Value value, Region *region) {
|
||||
// This value has to be a block argument for an affine.for or an
|
||||
// affine.parallel.
|
||||
auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
|
||||
return isa<AffineForOp>(parentOp) || isa<AffineParallelOp>(parentOp);
|
||||
return isa<AffineForOp, AffineParallelOp>(parentOp);
|
||||
}
|
||||
|
||||
// Affine apply operation is ok if all of its operands are ok.
|
||||
|
@ -120,8 +120,7 @@ AffineDataCopyGeneration::runOnBlock(Block *block,
|
||||
// Get to the first load, store, or for op (that is not a copy nest itself).
|
||||
auto curBegin =
|
||||
std::find_if(block->begin(), block->end(), [&](Operation &op) {
|
||||
return (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
|
||||
isa<AffineForOp>(op)) &&
|
||||
return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) &&
|
||||
copyNests.count(&op) == 0;
|
||||
});
|
||||
|
||||
@ -171,8 +170,7 @@ AffineDataCopyGeneration::runOnBlock(Block *block,
|
||||
}
|
||||
// Get to the next load or store op after 'forOp'.
|
||||
curBegin = std::find_if(std::next(it), block->end(), [&](Operation &op) {
|
||||
return (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
|
||||
isa<AffineForOp>(op)) &&
|
||||
return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) &&
|
||||
copyNests.count(&op) == 0;
|
||||
});
|
||||
it = curBegin;
|
||||
|
@ -63,10 +63,7 @@ areAllOpsInTheBlockListInvariant(Region &blockList, Value indVar,
|
||||
|
||||
static bool isMemRefDereferencingOp(Operation &op) {
|
||||
// TODO(asabne): Support DMA Ops.
|
||||
if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return isa<AffineLoadOp, AffineStoreOp>(op);
|
||||
}
|
||||
|
||||
// Returns true if the individual op is loop invariant.
|
||||
|
@ -93,7 +93,7 @@ void SimplifyAffineStructures::runOnFunction() {
|
||||
|
||||
// The simplification of the attribute will likely simplify the op. Try to
|
||||
// fold / apply canonicalization patterns when we have affine dialect ops.
|
||||
if (isa<AffineForOp>(op) || isa<AffineIfOp>(op) || isa<AffineApplyOp>(op))
|
||||
if (isa<AffineForOp, AffineIfOp, AffineApplyOp>(op))
|
||||
applyOpPatternsAndFold(op, patterns);
|
||||
});
|
||||
|
||||
|
@ -561,7 +561,7 @@ makePatterns(const DenseSet<Operation *> ¶llelLoops, int vectorRank,
|
||||
|
||||
static NestedPattern &vectorTransferPattern() {
|
||||
static auto pattern = matcher::Op([](Operation &op) {
|
||||
return isa<vector::TransferReadOp>(op) || isa<vector::TransferWriteOp>(op);
|
||||
return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
|
||||
});
|
||||
return pattern;
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
|
||||
}
|
||||
|
||||
static bool isSinkingBeneficiary(Operation *op) {
|
||||
return isa<ConstantOp>(op) || isa<DimOp>(op);
|
||||
return isa<ConstantOp, DimOp>(op);
|
||||
}
|
||||
|
||||
LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
|
||||
|
@ -38,7 +38,7 @@ void mlir::linalg::hoistViewAllocOps(FuncOp func) {
|
||||
while (changed) {
|
||||
changed = false;
|
||||
func.walk([&changed](Operation *op) {
|
||||
if (!isa<AllocOp>(op) && !isa<AllocaOp>(op) && !isa<DeallocOp>(op))
|
||||
if (!isa<AllocOp, AllocaOp, DeallocOp>(op))
|
||||
return;
|
||||
|
||||
LLVM_DEBUG(DBGS() << "Candidate for hoisting: " << *op << "\n");
|
||||
@ -64,15 +64,14 @@ void mlir::linalg::hoistViewAllocOps(FuncOp func) {
|
||||
v = op->getResult(0);
|
||||
}
|
||||
if (v && !llvm::all_of(v.getUses(), [&](OpOperand &operand) {
|
||||
return isa<ViewLikeOpInterface>(operand.getOwner()) ||
|
||||
isa<DeallocOp>(operand.getOwner());
|
||||
return isa<ViewLikeOpInterface, DeallocOp>(operand.getOwner());
|
||||
})) {
|
||||
LLVM_DEBUG(DBGS() << "Found non view-like or dealloc use: bail\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// Move AllocOp before the loop.
|
||||
if (isa<AllocOp>(op) || isa<AllocaOp>(op))
|
||||
if (isa<AllocOp, AllocaOp>(op))
|
||||
loop.moveOutOfLoop({op});
|
||||
else // Move DeallocOp outside of the loop.
|
||||
op->moveAfter(loop);
|
||||
|
@ -37,7 +37,7 @@ LogicalResult mlir::linalg::interchangeGenericLinalgOpPrecondition(
|
||||
if (interchangeVector.empty())
|
||||
return failure();
|
||||
// Transformation applies to generic ops only.
|
||||
if (!isa<GenericOp>(op) && !isa<IndexedGenericOp>(op))
|
||||
if (!isa<GenericOp, IndexedGenericOp>(op))
|
||||
return failure();
|
||||
LinalgOp linOp = cast<LinalgOp>(op);
|
||||
// Transformation applies to buffers only.
|
||||
|
@ -76,7 +76,7 @@ LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
|
||||
for (Type outputTensorType : linalgOp.getOutputTensorTypes())
|
||||
if (!outputTensorType.cast<ShapedType>().hasStaticShape())
|
||||
return failure();
|
||||
if (isa<linalg::MatmulOp>(op) || isa<linalg::FillOp>(op))
|
||||
if (isa<linalg::MatmulOp, linalg::FillOp>(op))
|
||||
return success();
|
||||
|
||||
auto genericOp = dyn_cast<linalg::GenericOp>(op);
|
||||
|
@ -831,7 +831,7 @@ static LogicalResult verify(YieldOp op) {
|
||||
auto results = parentOp->getResults();
|
||||
auto operands = op.getOperands();
|
||||
|
||||
if (isa<IfOp>(parentOp) || isa<ForOp>(parentOp)) {
|
||||
if (isa<IfOp, ForOp>(parentOp)) {
|
||||
if (parentOp->getNumResults() != op.getNumOperands())
|
||||
return op.emitOpError() << "parent of yield must have same number of "
|
||||
"results as the yield operands";
|
||||
|
@ -45,8 +45,7 @@ using namespace mlir::spirv;
|
||||
static inline bool containsReturn(Region ®ion) {
|
||||
return llvm::any_of(region, [](Block &block) {
|
||||
Operation *terminator = block.getTerminator();
|
||||
return isa<spirv::ReturnOp>(terminator) ||
|
||||
isa<spirv::ReturnValueOp>(terminator);
|
||||
return isa<spirv::ReturnOp, spirv::ReturnValueOp>(terminator);
|
||||
});
|
||||
}
|
||||
|
||||
@ -62,8 +61,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
|
||||
// Return true here when inlining into spv.func, spv.selection, and
|
||||
// spv.loop operations.
|
||||
auto *op = dest->getParentOp();
|
||||
return isa<spirv::FuncOp>(op) || isa<spirv::SelectionOp>(op) ||
|
||||
isa<spirv::LoopOp>(op);
|
||||
return isa<spirv::FuncOp, spirv::SelectionOp, spirv::LoopOp>(op);
|
||||
}
|
||||
|
||||
/// Returns true if the given operation 'op', that is registered to this
|
||||
@ -72,7 +70,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
|
||||
bool isLegalToInline(Operation *op, Region *dest,
|
||||
BlockAndValueMapping &) const final {
|
||||
// TODO(antiagainst): Enable inlining structured control flows with return.
|
||||
if ((isa<spirv::SelectionOp>(op) || isa<spirv::LoopOp>(op)) &&
|
||||
if ((isa<spirv::SelectionOp, spirv::LoopOp>(op)) &&
|
||||
containsReturn(op->getRegion(0)))
|
||||
return false;
|
||||
// TODO(antiagainst): we need to filter OpKill here to avoid inlining it to
|
||||
|
@ -22,8 +22,7 @@ using namespace mlir;
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
bool MemoryEffects::Effect::classof(const SideEffects::Effect *effect) {
|
||||
return isa<Allocate>(effect) || isa<Free>(effect) || isa<Read>(effect) ||
|
||||
isa<Write>(effect);
|
||||
return isa<Allocate, Free, Read, Write>(effect);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -97,7 +97,7 @@ private:
|
||||
|
||||
/// Returns true if the given pass is hidden from IR printing.
|
||||
static bool isHiddenPass(Pass *pass) {
|
||||
return isa<OpToOpPassAdaptor>(pass) || isa<VerifierPass>(pass);
|
||||
return isa<OpToOpPassAdaptor, VerifierPass>(pass);
|
||||
}
|
||||
|
||||
static void printIR(Operation *op, bool printModuleScope, raw_ostream &out,
|
||||
|
@ -34,8 +34,7 @@ std::string tblgen::Dialect::getCppClassName() const {
|
||||
static StringRef getAsStringOrEmpty(const llvm::Record &record,
|
||||
StringRef fieldName) {
|
||||
if (auto valueInit = record.getValueInit(fieldName)) {
|
||||
if (llvm::isa<llvm::CodeInit>(valueInit) ||
|
||||
llvm::isa<llvm::StringInit>(valueInit))
|
||||
if (llvm::isa<llvm::CodeInit, llvm::StringInit>(valueInit))
|
||||
return record.getValueAsString(fieldName);
|
||||
}
|
||||
return "";
|
||||
|
@ -558,7 +558,7 @@ StringRef tblgen::Operator::getSummary() const {
|
||||
|
||||
bool tblgen::Operator::hasAssemblyFormat() const {
|
||||
auto *valueInit = def.getValueInit("assemblyFormat");
|
||||
return isa<llvm::CodeInit>(valueInit) || isa<llvm::StringInit>(valueInit);
|
||||
return isa<llvm::CodeInit, llvm::StringInit>(valueInit);
|
||||
}
|
||||
|
||||
StringRef tblgen::Operator::getAssemblyFormat() const {
|
||||
|
@ -57,7 +57,7 @@ bool tblgen::DagLeaf::isEnumAttrCase() const {
|
||||
}
|
||||
|
||||
bool tblgen::DagLeaf::isStringAttr() const {
|
||||
return isa<llvm::StringInit>(def) || isa<llvm::CodeInit>(def);
|
||||
return isa<llvm::StringInit, llvm::CodeInit>(def);
|
||||
}
|
||||
|
||||
tblgen::Constraint tblgen::DagLeaf::getAsConstraint() const {
|
||||
|
@ -106,7 +106,7 @@ private:
|
||||
/// Globals are inserted before the first function, if any.
|
||||
Block::iterator getGlobalInsertPt() {
|
||||
auto i = module.getBody()->begin();
|
||||
while (!isa<LLVMFuncOp>(i) && !isa<ModuleTerminatorOp>(i))
|
||||
while (!isa<LLVMFuncOp, ModuleTerminatorOp>(i))
|
||||
++i;
|
||||
return i;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ llvm::Constant *ModuleTranslation::getLLVMConstant(llvm::Type *llvmType,
|
||||
// another sequence type. The recursion terminates because each step removes
|
||||
// one outer sequential type.
|
||||
bool elementTypeSequential =
|
||||
isa<llvm::ArrayType>(elementType) || isa<llvm::VectorType>(elementType);
|
||||
isa<llvm::ArrayType, llvm::VectorType>(elementType);
|
||||
llvm::Constant *child = getLLVMConstant(
|
||||
elementType,
|
||||
elementTypeSequential ? splatAttr : splatAttr.getSplatValue(), loc);
|
||||
|
@ -70,10 +70,8 @@ mlir::createLoopFusionPass(unsigned fastMemorySpace,
|
||||
|
||||
// TODO(b/117228571) Replace when this is modeled through side-effects/op traits
|
||||
static bool isMemRefDereferencingOp(Operation &op) {
|
||||
if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op) ||
|
||||
isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op))
|
||||
return true;
|
||||
return false;
|
||||
return isa<AffineReadOpInterface, AffineWriteOpInterface, AffineDmaStartOp,
|
||||
AffineDmaWaitOp>(op);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -207,7 +207,7 @@ void MemRefDataFlowOpt::runOnFunction() {
|
||||
// could still erase it if the call had no side-effects.
|
||||
continue;
|
||||
if (llvm::any_of(memref.getUsers(), [&](Operation *ownerOp) {
|
||||
return (!isa<AffineStoreOp>(ownerOp) && !isa<DeallocOp>(ownerOp));
|
||||
return !isa<AffineStoreOp, DeallocOp>(ownerOp);
|
||||
}))
|
||||
continue;
|
||||
|
||||
|
@ -48,7 +48,7 @@ std::unique_ptr<OperationPass<FuncOp>> mlir::createPipelineDataTransferPass() {
|
||||
// Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
|
||||
// added. TODO(b/117228571)
|
||||
static unsigned getTagMemRefPos(Operation &dmaOp) {
|
||||
assert(isa<AffineDmaStartOp>(dmaOp) || isa<AffineDmaWaitOp>(dmaOp));
|
||||
assert((isa<AffineDmaStartOp, AffineDmaWaitOp>(dmaOp)));
|
||||
if (auto dmaStartOp = dyn_cast<AffineDmaStartOp>(dmaOp)) {
|
||||
return dmaStartOp.getTagMemRefOperandIndex();
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ static Operation *getLastDependentOpInRange(Operation *opA, Operation *opB) {
|
||||
it != Block::reverse_iterator(opA); ++it) {
|
||||
Operation *opX = &(*it);
|
||||
opX->walk([&](Operation *op) {
|
||||
if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op)) {
|
||||
if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
|
||||
if (isDependentLoadOrStoreOp(op, values)) {
|
||||
lastDepOp = opX;
|
||||
return WalkResult::interrupt();
|
||||
@ -179,7 +179,7 @@ gatherLoadsAndStores(AffineForOp forOp,
|
||||
SmallVectorImpl<Operation *> &loadAndStoreOps) {
|
||||
bool hasIfOp = false;
|
||||
forOp.walk([&](Operation *op) {
|
||||
if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op))
|
||||
if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
|
||||
loadAndStoreOps.push_back(op);
|
||||
else if (isa<AffineIfOp>(op))
|
||||
hasIfOp = true;
|
||||
|
@ -30,10 +30,8 @@ using namespace mlir;
|
||||
// Temporary utility: will be replaced when this is modeled through
|
||||
// side-effects/op traits. TODO(b/117228571)
|
||||
static bool isMemRefDereferencingOp(Operation &op) {
|
||||
if (isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op) ||
|
||||
isa<AffineDmaStartOp>(op) || isa<AffineDmaWaitOp>(op))
|
||||
return true;
|
||||
return false;
|
||||
return isa<AffineReadOpInterface, AffineWriteOpInterface, AffineDmaStartOp,
|
||||
AffineDmaWaitOp>(op);
|
||||
}
|
||||
|
||||
/// Return the AffineMapAttr associated with memory 'op' on 'memref'.
|
||||
|
@ -131,7 +131,7 @@ static std::string replaceAllSubstrs(std::string str, const std::string &match,
|
||||
static inline bool hasStringAttribute(const Record &record,
|
||||
StringRef fieldName) {
|
||||
auto valueInit = record.getValueInit(fieldName);
|
||||
return isa<CodeInit>(valueInit) || isa<StringInit>(valueInit);
|
||||
return isa<CodeInit, StringInit>(valueInit);
|
||||
}
|
||||
|
||||
static std::string getArgumentName(const Operator &op, int index) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user