mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-04-03 22:02:12 +00:00
[mlir] NFC: Fix trivial typo
Differential Revision: https://reviews.llvm.org/D77473
This commit is contained in:
parent
abdd042bb7
commit
5aacce3db2
@ -96,7 +96,7 @@ Example:
|
|||||||
llvm.func @func() attributes {
|
llvm.func @func() attributes {
|
||||||
passthrough = ["noinline", // value-less attribute
|
passthrough = ["noinline", // value-less attribute
|
||||||
["alignstack", "4"], // integer attribute with value
|
["alignstack", "4"], // integer attribute with value
|
||||||
["other", "attr"]] // attrbute unknown to LLVM
|
["other", "attr"]] // attribute unknown to LLVM
|
||||||
} {
|
} {
|
||||||
llvm.return
|
llvm.return
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ indices, and optionally two additional arguments corresponding to the stride (in
|
|||||||
terms of number of elements) and the number of elements to transfer per stride.
|
terms of number of elements) and the number of elements to transfer per stride.
|
||||||
The tag location is used by a dma_wait operation to check for completion. The
|
The tag location is used by a dma_wait operation to check for completion. The
|
||||||
indices of the source memref, destination memref, and the tag memref have the
|
indices of the source memref, destination memref, and the tag memref have the
|
||||||
same restrictions as any load/store operation in a affine context (whenever DMA
|
same restrictions as any load/store operation in an affine context (whenever DMA
|
||||||
operations appear in an affine context). See
|
operations appear in an affine context). See
|
||||||
[restrictions on dimensions and symbols](Affine.md#restrictions-on-dimensions-and-symbols)
|
[restrictions on dimensions and symbols](Affine.md#restrictions-on-dimensions-and-symbols)
|
||||||
in affine contexts. This allows powerful static analysis and transformations in
|
in affine contexts. This allows powerful static analysis and transformations in
|
||||||
|
@ -1275,7 +1275,7 @@ Syntax:
|
|||||||
affine-map-attribute ::= `affine_map` `<` affine-map `>`
|
affine-map-attribute ::= `affine_map` `<` affine-map `>`
|
||||||
```
|
```
|
||||||
|
|
||||||
An affine-map attribute is an attribute that represents a affine-map object.
|
An affine-map attribute is an attribute that represents an affine-map object.
|
||||||
|
|
||||||
#### Array Attribute
|
#### Array Attribute
|
||||||
|
|
||||||
|
@ -532,7 +532,7 @@ with [Tensor Comprehensions](#lessonstc).
|
|||||||
Of course, we are not advocating for using ML everywhere in the stack
|
Of course, we are not advocating for using ML everywhere in the stack
|
||||||
immediately: low-level compilation and machine models are still quite performant
|
immediately: low-level compilation and machine models are still quite performant
|
||||||
in LLVM. However, for the high-level and mid-level optimization problems,
|
in LLVM. However, for the high-level and mid-level optimization problems,
|
||||||
models need to be conditioned (probalistically) on the low-level
|
models need to be conditioned (probabilistically) on the low-level
|
||||||
compiler which acts as a blackbox. For these reasons we prioritize the
|
compiler which acts as a blackbox. For these reasons we prioritize the
|
||||||
design of IR and transformations with search-friendly properties over
|
design of IR and transformations with search-friendly properties over
|
||||||
building cost models.
|
building cost models.
|
||||||
|
@ -1390,7 +1390,7 @@ EXT_LINKS_IN_WINDOW = NO
|
|||||||
|
|
||||||
FORMULA_FONTSIZE = 10
|
FORMULA_FONTSIZE = 10
|
||||||
|
|
||||||
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
|
# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
|
||||||
# generated for formulas are transparent PNGs. Transparent PNGs are not
|
# generated for formulas are transparent PNGs. Transparent PNGs are not
|
||||||
# supported properly for IE 6.0, but are supported on all modern browsers.
|
# supported properly for IE 6.0, but are supported on all modern browsers.
|
||||||
#
|
#
|
||||||
|
@ -192,7 +192,7 @@ static mlir::LogicalResult verifyConstantForType(mlir::Type type,
|
|||||||
mlir::Attribute opaqueValue,
|
mlir::Attribute opaqueValue,
|
||||||
mlir::Operation *op) {
|
mlir::Operation *op) {
|
||||||
if (type.isa<mlir::TensorType>()) {
|
if (type.isa<mlir::TensorType>()) {
|
||||||
// Check that the value is a elements attribute.
|
// Check that the value is an elements attribute.
|
||||||
auto attrValue = opaqueValue.dyn_cast<mlir::DenseFPElementsAttr>();
|
auto attrValue = opaqueValue.dyn_cast<mlir::DenseFPElementsAttr>();
|
||||||
if (!attrValue)
|
if (!attrValue)
|
||||||
return op->emitError("constant of TensorType must be initialized by "
|
return op->emitError("constant of TensorType must be initialized by "
|
||||||
|
@ -30,12 +30,12 @@ using TransitiveFilter = std::function<bool(Operation *)>;
|
|||||||
/// the transitive uses of op), **without** including that operation.
|
/// the transitive uses of op), **without** including that operation.
|
||||||
///
|
///
|
||||||
/// This additionally takes a TransitiveFilter which acts as a frontier:
|
/// This additionally takes a TransitiveFilter which acts as a frontier:
|
||||||
/// when looking at uses transitively, a operation that does not pass the
|
/// when looking at uses transitively, an operation that does not pass the
|
||||||
/// filter is never propagated through. This allows in particular to carve out
|
/// filter is never propagated through. This allows in particular to carve out
|
||||||
/// the scope within a ForInst or the scope within an IfInst.
|
/// the scope within a ForInst or the scope within an IfInst.
|
||||||
///
|
///
|
||||||
/// The implementation traverses the use chains in postorder traversal for
|
/// The implementation traverses the use chains in postorder traversal for
|
||||||
/// efficiency reasons: if a operation is already in `forwardSlice`, no
|
/// efficiency reasons: if an operation is already in `forwardSlice`, no
|
||||||
/// need to traverse its uses again. Since use-def chains form a DAG, this
|
/// need to traverse its uses again. Since use-def chains form a DAG, this
|
||||||
/// terminates.
|
/// terminates.
|
||||||
///
|
///
|
||||||
@ -76,12 +76,12 @@ void getForwardSlice(
|
|||||||
/// all the transitive defs of op), **without** including that operation.
|
/// all the transitive defs of op), **without** including that operation.
|
||||||
///
|
///
|
||||||
/// This additionally takes a TransitiveFilter which acts as a frontier:
|
/// This additionally takes a TransitiveFilter which acts as a frontier:
|
||||||
/// when looking at defs transitively, a operation that does not pass the
|
/// when looking at defs transitively, an operation that does not pass the
|
||||||
/// filter is never propagated through. This allows in particular to carve out
|
/// filter is never propagated through. This allows in particular to carve out
|
||||||
/// the scope within a ForInst or the scope within an IfInst.
|
/// the scope within a ForInst or the scope within an IfInst.
|
||||||
///
|
///
|
||||||
/// The implementation traverses the def chains in postorder traversal for
|
/// The implementation traverses the def chains in postorder traversal for
|
||||||
/// efficiency reasons: if a operation is already in `backwardSlice`, no
|
/// efficiency reasons: if an operation is already in `backwardSlice`, no
|
||||||
/// need to traverse its definitions again. Since useuse-def chains form a DAG,
|
/// need to traverse its definitions again. Since useuse-def chains form a DAG,
|
||||||
/// this terminates.
|
/// this terminates.
|
||||||
///
|
///
|
||||||
|
@ -127,7 +127,7 @@ public:
|
|||||||
Location loc) override;
|
Location loc) override;
|
||||||
|
|
||||||
/// Gets the LLVM representation of the index type. The returned type is an
|
/// Gets the LLVM representation of the index type. The returned type is an
|
||||||
/// integer type with the size confgured for this type converter.
|
/// integer type with the size configured for this type converter.
|
||||||
LLVM::LLVMType getIndexType();
|
LLVM::LLVMType getIndexType();
|
||||||
|
|
||||||
/// Gets the bitwidth of the index type when converted to LLVM.
|
/// Gets the bitwidth of the index type when converted to LLVM.
|
||||||
@ -417,7 +417,7 @@ public:
|
|||||||
|
|
||||||
namespace LLVM {
|
namespace LLVM {
|
||||||
namespace detail {
|
namespace detail {
|
||||||
/// Replaces the given operaiton "op" with a new operation of type "targetOp"
|
/// Replaces the given operation "op" with a new operation of type "targetOp"
|
||||||
/// and given operands.
|
/// and given operands.
|
||||||
LogicalResult oneToOneRewrite(Operation *op, StringRef targetOp,
|
LogicalResult oneToOneRewrite(Operation *op, StringRef targetOp,
|
||||||
ValueRange operands,
|
ValueRange operands,
|
||||||
@ -435,7 +435,7 @@ LogicalResult vectorOneToOneRewrite(Operation *op, StringRef targetOp,
|
|||||||
/// "TargetOp" where the latter belongs to the LLVM dialect or an equivalent.
|
/// "TargetOp" where the latter belongs to the LLVM dialect or an equivalent.
|
||||||
/// Upholds a convention that multi-result operations get converted into an
|
/// Upholds a convention that multi-result operations get converted into an
|
||||||
/// operation returning the LLVM IR structure type, in which case individual
|
/// operation returning the LLVM IR structure type, in which case individual
|
||||||
/// values must be extacted from using LLVM::ExtractValueOp before being used.
|
/// values must be extracted from using LLVM::ExtractValueOp before being used.
|
||||||
template <typename SourceOp, typename TargetOp>
|
template <typename SourceOp, typename TargetOp>
|
||||||
class OneToOneConvertToLLVMPattern : public ConvertOpToLLVMPattern<SourceOp> {
|
class OneToOneConvertToLLVMPattern : public ConvertOpToLLVMPattern<SourceOp> {
|
||||||
public:
|
public:
|
||||||
|
@ -54,7 +54,7 @@ void populateStdToLLVMBarePtrConversionPatterns(
|
|||||||
bool useAlloca = false);
|
bool useAlloca = false);
|
||||||
|
|
||||||
/// Value to pass as bitwidth for the index type when the converter is expected
|
/// Value to pass as bitwidth for the index type when the converter is expected
|
||||||
/// to derive the bitwith from the LLVM data layout.
|
/// to derive the bitwidth from the LLVM data layout.
|
||||||
static constexpr unsigned kDeriveIndexBitwidthFromDataLayout = 0;
|
static constexpr unsigned kDeriveIndexBitwidthFromDataLayout = 0;
|
||||||
|
|
||||||
/// Creates a pass to convert the Standard dialect into the LLVMIR dialect.
|
/// Creates a pass to convert the Standard dialect into the LLVMIR dialect.
|
||||||
|
@ -256,7 +256,7 @@ def MatmulOp : LinalgStructured_Op<"matmul", [NInputs<2>, NOutputs<1>]> {
|
|||||||
/// OptionalAttr<I64ArrayAttr>:$strides
|
/// OptionalAttr<I64ArrayAttr>:$strides
|
||||||
/// OptionalAttr<I64ArrayAttr>:$dilations
|
/// OptionalAttr<I64ArrayAttr>:$dilations
|
||||||
/// OptionalAttr<I64ElementsAttr>:$padding
|
/// OptionalAttr<I64ElementsAttr>:$padding
|
||||||
/// `stirdes` denotes the step of each window along the dimension.
|
/// `strides` denotes the step of each window along the dimension.
|
||||||
class PoolingBase_Op<string mnemonic, list<OpTrait> props>
|
class PoolingBase_Op<string mnemonic, list<OpTrait> props>
|
||||||
: LinalgStructured_Op<mnemonic, props> {
|
: LinalgStructured_Op<mnemonic, props> {
|
||||||
let description = [{
|
let description = [{
|
||||||
|
@ -349,7 +349,7 @@ def AssumeAlignmentOp : Std_Op<"assume_alignment"> {
|
|||||||
let summary =
|
let summary =
|
||||||
"assertion that gives alignment information to the input memref";
|
"assertion that gives alignment information to the input memref";
|
||||||
let description = [{
|
let description = [{
|
||||||
The `assume_alignment` operation takes a memref and a integer of alignment
|
The `assume_alignment` operation takes a memref and an integer of alignment
|
||||||
value, and internally annotates the buffer with the given alignment. If
|
value, and internally annotates the buffer with the given alignment. If
|
||||||
the buffer isn't aligned to the given alignment, the behavior is undefined.
|
the buffer isn't aligned to the given alignment, the behavior is undefined.
|
||||||
|
|
||||||
|
@ -601,13 +601,13 @@ public:
|
|||||||
int64_t getNumElements() const;
|
int64_t getNumElements() const;
|
||||||
|
|
||||||
/// Generates a new ElementsAttr by mapping each int value to a new
|
/// Generates a new ElementsAttr by mapping each int value to a new
|
||||||
/// underlying APInt. The new values can represent either a integer or float.
|
/// underlying APInt. The new values can represent either an integer or float.
|
||||||
/// This ElementsAttr should contain integers.
|
/// This ElementsAttr should contain integers.
|
||||||
ElementsAttr mapValues(Type newElementType,
|
ElementsAttr mapValues(Type newElementType,
|
||||||
function_ref<APInt(const APInt &)> mapping) const;
|
function_ref<APInt(const APInt &)> mapping) const;
|
||||||
|
|
||||||
/// Generates a new ElementsAttr by mapping each float value to a new
|
/// Generates a new ElementsAttr by mapping each float value to a new
|
||||||
/// underlying APInt. The new values can represent either a integer or float.
|
/// underlying APInt. The new values can represent either an integer or float.
|
||||||
/// This ElementsAttr should contain floats.
|
/// This ElementsAttr should contain floats.
|
||||||
ElementsAttr mapValues(Type newElementType,
|
ElementsAttr mapValues(Type newElementType,
|
||||||
function_ref<APInt(const APFloat &)> mapping) const;
|
function_ref<APInt(const APFloat &)> mapping) const;
|
||||||
@ -950,13 +950,13 @@ public:
|
|||||||
DenseElementsAttr reshape(ShapedType newType);
|
DenseElementsAttr reshape(ShapedType newType);
|
||||||
|
|
||||||
/// Generates a new DenseElementsAttr by mapping each int value to a new
|
/// Generates a new DenseElementsAttr by mapping each int value to a new
|
||||||
/// underlying APInt. The new values can represent either a integer or float.
|
/// underlying APInt. The new values can represent either an integer or float.
|
||||||
/// This underlying type must be an DenseIntElementsAttr.
|
/// This underlying type must be an DenseIntElementsAttr.
|
||||||
DenseElementsAttr mapValues(Type newElementType,
|
DenseElementsAttr mapValues(Type newElementType,
|
||||||
function_ref<APInt(const APInt &)> mapping) const;
|
function_ref<APInt(const APInt &)> mapping) const;
|
||||||
|
|
||||||
/// Generates a new DenseElementsAttr by mapping each float value to a new
|
/// Generates a new DenseElementsAttr by mapping each float value to a new
|
||||||
/// underlying APInt. the new values can represent either a integer or float.
|
/// underlying APInt. the new values can represent either an integer or float.
|
||||||
/// This underlying type must be an DenseFPElementsAttr.
|
/// This underlying type must be an DenseFPElementsAttr.
|
||||||
DenseElementsAttr
|
DenseElementsAttr
|
||||||
mapValues(Type newElementType,
|
mapValues(Type newElementType,
|
||||||
|
@ -128,7 +128,7 @@ public:
|
|||||||
/// Return the signedness semantics of this integer type.
|
/// Return the signedness semantics of this integer type.
|
||||||
SignednessSemantics getSignedness() const;
|
SignednessSemantics getSignedness() const;
|
||||||
|
|
||||||
/// Return true if this is a singless integer type.
|
/// Return true if this is a signless integer type.
|
||||||
bool isSignless() const { return getSignedness() == Signless; }
|
bool isSignless() const { return getSignedness() == Signless; }
|
||||||
/// Return true if this is a signed integer type.
|
/// Return true if this is a signed integer type.
|
||||||
bool isSigned() const { return getSignedness() == Signed; }
|
bool isSigned() const { return getSignedness() == Signed; }
|
||||||
|
@ -246,7 +246,7 @@ public:
|
|||||||
return impl->analyses.getCachedAnalysis<AnalysisT>();
|
return impl->analyses.getCachedAnalysis<AnalysisT>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Query for a analysis of a child operation, constructing it if necessary.
|
/// Query for an analysis of a child operation, constructing it if necessary.
|
||||||
template <typename AnalysisT> AnalysisT &getChildAnalysis(Operation *op) {
|
template <typename AnalysisT> AnalysisT &getChildAnalysis(Operation *op) {
|
||||||
return slice(op).template getAnalysis<AnalysisT>();
|
return slice(op).template getAnalysis<AnalysisT>();
|
||||||
}
|
}
|
||||||
|
@ -308,7 +308,7 @@ public:
|
|||||||
const_iterator find(StringRef key) const;
|
const_iterator find(StringRef key) const;
|
||||||
|
|
||||||
// Returns the number of static values of the given `symbol` corresponds to.
|
// Returns the number of static values of the given `symbol` corresponds to.
|
||||||
// A static value is a operand/result declared in ODS. Normally a symbol only
|
// A static value is an operand/result declared in ODS. Normally a symbol only
|
||||||
// represents one static value, but symbols bound to op results can represent
|
// represents one static value, but symbols bound to op results can represent
|
||||||
// more than one if the op is a multi-result op.
|
// more than one if the op is a multi-result op.
|
||||||
int getStaticValueCount(StringRef symbol) const;
|
int getStaticValueCount(StringRef symbol) const;
|
||||||
|
@ -56,7 +56,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// Pattern lowering GPU block/thread size/id to loading SPIR-V invocation
|
/// Pattern lowering GPU block/thread size/id to loading SPIR-V invocation
|
||||||
/// builin variables.
|
/// builtin variables.
|
||||||
template <typename SourceOp, spirv::BuiltIn builtin>
|
template <typename SourceOp, spirv::BuiltIn builtin>
|
||||||
class LaunchConfigConversion : public SPIRVOpLowering<SourceOp> {
|
class LaunchConfigConversion : public SPIRVOpLowering<SourceOp> {
|
||||||
public:
|
public:
|
||||||
|
@ -742,7 +742,7 @@ static LogicalResult processParallelLoop(
|
|||||||
/// the hardware id might iterate over additional indices. The transformation
|
/// the hardware id might iterate over additional indices. The transformation
|
||||||
/// caters for this by predicating the created sequence of instructions on
|
/// caters for this by predicating the created sequence of instructions on
|
||||||
/// the actual loop bound. This only works if an static upper bound for the
|
/// the actual loop bound. This only works if an static upper bound for the
|
||||||
/// dynamic loop bound can be defived, currently via analyzing `affine.min`
|
/// dynamic loop bound can be derived, currently via analyzing `affine.min`
|
||||||
/// operations.
|
/// operations.
|
||||||
LogicalResult
|
LogicalResult
|
||||||
ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
|
ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
|
||||||
|
@ -1090,7 +1090,7 @@ void nDVectorIterate(const NDVectorTypeInfo &info, OpBuilder &builder,
|
|||||||
}
|
}
|
||||||
////////////// End Support for Lowering operations on n-D vectors //////////////
|
////////////// End Support for Lowering operations on n-D vectors //////////////
|
||||||
|
|
||||||
/// Replaces the given operaiton "op" with a new operation of type "targetOp"
|
/// Replaces the given operation "op" with a new operation of type "targetOp"
|
||||||
/// and given operands.
|
/// and given operands.
|
||||||
LogicalResult LLVM::detail::oneToOneRewrite(
|
LogicalResult LLVM::detail::oneToOneRewrite(
|
||||||
Operation *op, StringRef targetOp, ValueRange operands,
|
Operation *op, StringRef targetOp, ValueRange operands,
|
||||||
@ -1698,7 +1698,7 @@ struct MemRefCastOpLowering : public ConvertOpToLLVMPattern<MemRefCastOp> {
|
|||||||
auto loadOp = rewriter.create<LLVM::LoadOp>(loc, castPtr);
|
auto loadOp = rewriter.create<LLVM::LoadOp>(loc, castPtr);
|
||||||
rewriter.replaceOp(op, loadOp.getResult());
|
rewriter.replaceOp(op, loadOp.getResult());
|
||||||
} else {
|
} else {
|
||||||
llvm_unreachable("Unsuppored unranked memref to unranked memref cast");
|
llvm_unreachable("Unsupported unranked memref to unranked memref cast");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -2285,7 +2285,7 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<SubViewOp> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Conversion pattern that transforms a op into:
|
/// Conversion pattern that transforms an op into:
|
||||||
/// 1. An `llvm.mlir.undef` operation to create a memref descriptor
|
/// 1. An `llvm.mlir.undef` operation to create a memref descriptor
|
||||||
/// 2. Updates to the descriptor to introduce the data ptr, offset, size
|
/// 2. Updates to the descriptor to introduce the data ptr, offset, size
|
||||||
/// and stride.
|
/// and stride.
|
||||||
|
@ -37,8 +37,8 @@ static bool isBoolScalarOrVector(Type type) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts the given `srcAttr` into a boolean attribute if it holds a integral
|
/// Converts the given `srcAttr` into a boolean attribute if it holds an
|
||||||
/// value. Returns null attribute if conversion fails.
|
/// integral value. Returns null attribute if conversion fails.
|
||||||
static BoolAttr convertBoolAttr(Attribute srcAttr, Builder builder) {
|
static BoolAttr convertBoolAttr(Attribute srcAttr, Builder builder) {
|
||||||
if (auto boolAttr = srcAttr.dyn_cast<BoolAttr>())
|
if (auto boolAttr = srcAttr.dyn_cast<BoolAttr>())
|
||||||
return boolAttr;
|
return boolAttr;
|
||||||
|
@ -90,7 +90,7 @@ LogicalResult GPUDialect::verifyOperationAttribute(Operation *op,
|
|||||||
|
|
||||||
// TODO(ntv,zinenko,herhut): if the kernel function has been converted to
|
// TODO(ntv,zinenko,herhut): if the kernel function has been converted to
|
||||||
// the LLVM dialect but the caller hasn't (which happens during the
|
// the LLVM dialect but the caller hasn't (which happens during the
|
||||||
// separate compilation), do not check type correspondance as it would
|
// separate compilation), do not check type correspondence as it would
|
||||||
// require the verifier to be aware of the LLVM type conversion.
|
// require the verifier to be aware of the LLVM type conversion.
|
||||||
if (kernelLLVMFunction)
|
if (kernelLLVMFunction)
|
||||||
return success();
|
return success();
|
||||||
|
@ -241,7 +241,7 @@ private:
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an accumulator for comparaison such as min, max. T is the type
|
/// Returns an accumulator for comparison such as min, max. T is the type
|
||||||
/// of the compare op.
|
/// of the compare op.
|
||||||
template <typename T, typename PredicateEnum, PredicateEnum predicate>
|
template <typename T, typename PredicateEnum, PredicateEnum predicate>
|
||||||
AccumulatorFactory getCmpFactory() const {
|
AccumulatorFactory getCmpFactory() const {
|
||||||
|
@ -162,8 +162,8 @@ static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
|
|||||||
// cleaner.
|
// cleaner.
|
||||||
launchOpBody.cloneInto(&outlinedFuncBody, map);
|
launchOpBody.cloneInto(&outlinedFuncBody, map);
|
||||||
|
|
||||||
// Branch from enty of the gpu.func operation to the block that is cloned from
|
// Branch from entry of the gpu.func operation to the block that is cloned
|
||||||
// the entry block of the gpu.launch operation.
|
// from the entry block of the gpu.launch operation.
|
||||||
Block &launchOpEntry = launchOpBody.front();
|
Block &launchOpEntry = launchOpBody.front();
|
||||||
Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
|
Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
|
||||||
builder.setInsertionPointToEnd(&entryBlock);
|
builder.setInsertionPointToEnd(&entryBlock);
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===- ParallelLoopSpecialization.cpp - loop.parallel specializeation -----===//
|
//===- ParallelLoopSpecialization.cpp - loop.parallel specialization ------===//
|
||||||
//
|
//
|
||||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||||
// See https://llvm.org/LICENSE.txt for license information.
|
// See https://llvm.org/LICENSE.txt for license information.
|
||||||
|
@ -2533,7 +2533,7 @@ static LogicalResult verify(spirv::UnreachableOp unreachableOp) {
|
|||||||
if (block->hasNoPredecessors())
|
if (block->hasNoPredecessors())
|
||||||
return success();
|
return success();
|
||||||
|
|
||||||
// TODO(antiagainst): further verification needs to analyze reachablility from
|
// TODO(antiagainst): further verification needs to analyze reachability from
|
||||||
// the entry block.
|
// the entry block.
|
||||||
|
|
||||||
return success();
|
return success();
|
||||||
|
@ -1013,7 +1013,7 @@ static LogicalResult verify(ReshapeOp op) {
|
|||||||
return op.emitError("invalid output shape for vector type ")
|
return op.emitError("invalid output shape for vector type ")
|
||||||
<< outputVectorType;
|
<< outputVectorType;
|
||||||
|
|
||||||
// Verify that the 'fixedVectorSizes' match a input/output vector shape
|
// Verify that the 'fixedVectorSizes' match an input/output vector shape
|
||||||
// suffix.
|
// suffix.
|
||||||
unsigned inputVectorRank = inputVectorType.getRank();
|
unsigned inputVectorRank = inputVectorType.getRank();
|
||||||
for (unsigned i = 0; i < numFixedVectorSizes; ++i) {
|
for (unsigned i = 0; i < numFixedVectorSizes; ++i) {
|
||||||
|
@ -196,7 +196,7 @@ struct FloatAttributeStorage final
|
|||||||
size_t numObjects;
|
size_t numObjects;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// An attribute representing a integral value.
|
/// An attribute representing an integral value.
|
||||||
struct IntegerAttributeStorage final
|
struct IntegerAttributeStorage final
|
||||||
: public AttributeStorage,
|
: public AttributeStorage,
|
||||||
public llvm::TrailingObjects<IntegerAttributeStorage, uint64_t> {
|
public llvm::TrailingObjects<IntegerAttributeStorage, uint64_t> {
|
||||||
|
@ -407,24 +407,24 @@ Block *llvm::ilist_traits<::mlir::Operation>::getContainingBlock() {
|
|||||||
return reinterpret_cast<Block *>(reinterpret_cast<char *>(Anchor) - Offset);
|
return reinterpret_cast<Block *>(reinterpret_cast<char *>(Anchor) - Offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is a trait method invoked when a operation is added to a block. We
|
/// This is a trait method invoked when an operation is added to a block. We
|
||||||
/// keep the block pointer up to date.
|
/// keep the block pointer up to date.
|
||||||
void llvm::ilist_traits<::mlir::Operation>::addNodeToList(Operation *op) {
|
void llvm::ilist_traits<::mlir::Operation>::addNodeToList(Operation *op) {
|
||||||
assert(!op->getBlock() && "already in a operation block!");
|
assert(!op->getBlock() && "already in an operation block!");
|
||||||
op->block = getContainingBlock();
|
op->block = getContainingBlock();
|
||||||
|
|
||||||
// Invalidate the order on the operation.
|
// Invalidate the order on the operation.
|
||||||
op->orderIndex = Operation::kInvalidOrderIdx;
|
op->orderIndex = Operation::kInvalidOrderIdx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is a trait method invoked when a operation is removed from a block.
|
/// This is a trait method invoked when an operation is removed from a block.
|
||||||
/// We keep the block pointer up to date.
|
/// We keep the block pointer up to date.
|
||||||
void llvm::ilist_traits<::mlir::Operation>::removeNodeFromList(Operation *op) {
|
void llvm::ilist_traits<::mlir::Operation>::removeNodeFromList(Operation *op) {
|
||||||
assert(op->block && "not already in a operation block!");
|
assert(op->block && "not already in an operation block!");
|
||||||
op->block = nullptr;
|
op->block = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is a trait method invoked when a operation is moved from one block
|
/// This is a trait method invoked when an operation is moved from one block
|
||||||
/// to another. We keep the block pointer up to date.
|
/// to another. We keep the block pointer up to date.
|
||||||
void llvm::ilist_traits<::mlir::Operation>::transferNodesFromList(
|
void llvm::ilist_traits<::mlir::Operation>::transferNodesFromList(
|
||||||
ilist_traits<Operation> &otherList, op_iterator first, op_iterator last) {
|
ilist_traits<Operation> &otherList, op_iterator first, op_iterator last) {
|
||||||
|
@ -441,7 +441,7 @@ static Optional<WalkResult> walkSymbolUses(
|
|||||||
return WalkResult::advance();
|
return WalkResult::advance();
|
||||||
}
|
}
|
||||||
/// Walk all of the uses, for any symbol, that are nested within the given
|
/// Walk all of the uses, for any symbol, that are nested within the given
|
||||||
/// operaion 'from', invoking the provided callback for each. This does not
|
/// operation 'from', invoking the provided callback for each. This does not
|
||||||
/// traverse into any nested symbol tables.
|
/// traverse into any nested symbol tables.
|
||||||
static Optional<WalkResult> walkSymbolUses(
|
static Optional<WalkResult> walkSymbolUses(
|
||||||
Operation *from,
|
Operation *from,
|
||||||
|
@ -243,7 +243,7 @@ struct PassTiming : public PassInstrumentation {
|
|||||||
|
|
||||||
void PassTiming::runBeforePipeline(const OperationName &name,
|
void PassTiming::runBeforePipeline(const OperationName &name,
|
||||||
const PipelineParentInfo &parentInfo) {
|
const PipelineParentInfo &parentInfo) {
|
||||||
// We don't actually want to time the piplelines, they gather their total
|
// We don't actually want to time the pipelines, they gather their total
|
||||||
// from their held passes.
|
// from their held passes.
|
||||||
getTimer(name.getAsOpaquePointer(), TimerKind::Pipeline,
|
getTimer(name.getAsOpaquePointer(), TimerKind::Pipeline,
|
||||||
[&] { return ("'" + name.getStringRef() + "' Pipeline").str(); });
|
[&] { return ("'" + name.getStringRef() + "' Pipeline").str(); });
|
||||||
|
@ -2123,7 +2123,7 @@ void mlir::gatherLoops(FuncOp func,
|
|||||||
// TODO: if necessary, this can be extended to also compose in any
|
// TODO: if necessary, this can be extended to also compose in any
|
||||||
// affine.applys, fold to constant if all result dimensions of the map are
|
// affine.applys, fold to constant if all result dimensions of the map are
|
||||||
// constant (canonicalizeMapAndOperands below already does this for single
|
// constant (canonicalizeMapAndOperands below already does this for single
|
||||||
// result bound maps), and use simplifyMap to perform algebraic simplication.
|
// result bound maps), and use simplifyMap to perform algebraic simplification.
|
||||||
AffineForOp mlir::createCanonicalizedAffineForOp(
|
AffineForOp mlir::createCanonicalizedAffineForOp(
|
||||||
OpBuilder b, Location loc, ValueRange lbOperands, AffineMap lbMap,
|
OpBuilder b, Location loc, ValueRange lbOperands, AffineMap lbMap,
|
||||||
ValueRange ubOperands, AffineMap ubMap, int64_t step) {
|
ValueRange ubOperands, AffineMap ubMap, int64_t step) {
|
||||||
|
@ -347,7 +347,7 @@ func @loop_min_max(%N : index) {
|
|||||||
#map_7_values = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>
|
#map_7_values = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3, d4, d5, d6)>
|
||||||
|
|
||||||
// Check that the "min" (cmpi "slt" + select) reduction sequence is emitted
|
// Check that the "min" (cmpi "slt" + select) reduction sequence is emitted
|
||||||
// correctly for a an affine map with 7 results.
|
// correctly for an affine map with 7 results.
|
||||||
|
|
||||||
// CHECK-LABEL: func @min_reduction_tree
|
// CHECK-LABEL: func @min_reduction_tree
|
||||||
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
|
// CHECK-NEXT: %[[c0:.*]] = constant 0 : index
|
||||||
|
@ -78,7 +78,7 @@ func @llvm_type(!spv.array<4x!llvm.i32>) -> ()
|
|||||||
// -----
|
// -----
|
||||||
|
|
||||||
// expected-error @+1 {{ArrayStride must be greater than zero}}
|
// expected-error @+1 {{ArrayStride must be greater than zero}}
|
||||||
func @array_type_zero_stide(!spv.array<4xi32 [0]>) -> ()
|
func @array_type_zero_stride(!spv.array<4xi32 [0]>) -> ()
|
||||||
|
|
||||||
// -----
|
// -----
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ static llvm::cl::opt<TestLegalizePatternDriver::ConversionMode>
|
|||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// ConversionPatternRewriter::getRemappedValue testing. This method is used
|
// ConversionPatternRewriter::getRemappedValue testing. This method is used
|
||||||
// to get the remapped value of a original value that was replaced using
|
// to get the remapped value of an original value that was replaced using
|
||||||
// ConversionPatternRewriter.
|
// ConversionPatternRewriter.
|
||||||
namespace {
|
namespace {
|
||||||
/// Converter that replaces a one-result one-operand OneVResOneVOperandOp1 with
|
/// Converter that replaces a one-result one-operand OneVResOneVOperandOp1 with
|
||||||
|
@ -243,7 +243,7 @@ def OptionalInvalidK : TestFormat_Op<"optional_invalid_k", [{
|
|||||||
// Variables
|
// Variables
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
// CHECK: error: expected variable to refer to a argument, result, or successor
|
// CHECK: error: expected variable to refer to an argument, result, or successor
|
||||||
def VariableInvalidA : TestFormat_Op<"variable_invalid_a", [{
|
def VariableInvalidA : TestFormat_Op<"variable_invalid_a", [{
|
||||||
$unknown_arg attr-dict
|
$unknown_arg attr-dict
|
||||||
}]>;
|
}]>;
|
||||||
|
@ -931,7 +931,7 @@ static void genElementPrinter(Element *element, OpMethodBody &body,
|
|||||||
if (auto *attr = dyn_cast<AttributeVariable>(element)) {
|
if (auto *attr = dyn_cast<AttributeVariable>(element)) {
|
||||||
const NamedAttribute *var = attr->getVar();
|
const NamedAttribute *var = attr->getVar();
|
||||||
|
|
||||||
// If we are formatting as a enum, symbolize the attribute as a string.
|
// If we are formatting as an enum, symbolize the attribute as a string.
|
||||||
if (canFormatEnumAttr(var)) {
|
if (canFormatEnumAttr(var)) {
|
||||||
const EnumAttr &enumAttr = cast<EnumAttr>(var->attr);
|
const EnumAttr &enumAttr = cast<EnumAttr>(var->attr);
|
||||||
body << " p << \"\\\"\" << " << enumAttr.getSymbolToStringFnName() << "("
|
body << " p << \"\\\"\" << " << enumAttr.getSymbolToStringFnName() << "("
|
||||||
@ -1656,7 +1656,7 @@ LogicalResult FormatParser::parseVariable(std::unique_ptr<Element> &element,
|
|||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
return emitError(
|
return emitError(
|
||||||
loc, "expected variable to refer to a argument, result, or successor");
|
loc, "expected variable to refer to an argument, result, or successor");
|
||||||
}
|
}
|
||||||
|
|
||||||
LogicalResult FormatParser::parseDirective(std::unique_ptr<Element> &element,
|
LogicalResult FormatParser::parseDirective(std::unique_ptr<Element> &element,
|
||||||
|
@ -127,7 +127,7 @@ private:
|
|||||||
void createSeparateLocalVarsForOpArgs(DagNode node,
|
void createSeparateLocalVarsForOpArgs(DagNode node,
|
||||||
ChildNodeIndexNameMap &childNodeNames);
|
ChildNodeIndexNameMap &childNodeNames);
|
||||||
|
|
||||||
// Emits the concrete arguments used to call a op's builder.
|
// Emits the concrete arguments used to call an op's builder.
|
||||||
void supplyValuesForOpArgs(DagNode node,
|
void supplyValuesForOpArgs(DagNode node,
|
||||||
const ChildNodeIndexNameMap &childNodeNames);
|
const ChildNodeIndexNameMap &childNodeNames);
|
||||||
|
|
||||||
|
@ -363,7 +363,7 @@ LogicalResult VulkanRuntime::createMemoryBuffers() {
|
|||||||
resourceStorageClassData.find(descriptorSetIndex);
|
resourceStorageClassData.find(descriptorSetIndex);
|
||||||
if (resourceStorageClassMapIt == resourceStorageClassData.end()) {
|
if (resourceStorageClassMapIt == resourceStorageClassData.end()) {
|
||||||
llvm::errs()
|
llvm::errs()
|
||||||
<< "cannot find storge class for resource in descriptor set: "
|
<< "cannot find storage class for resource in descriptor set: "
|
||||||
<< descriptorSetIndex;
|
<< descriptorSetIndex;
|
||||||
return failure();
|
return failure();
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ inline void emitVulkanError(const llvm::Twine &message, VkResult error) {
|
|||||||
/// SPIR-V shader, number of work groups and entry point. After the creation of
|
/// SPIR-V shader, number of work groups and entry point. After the creation of
|
||||||
/// VulkanRuntime, special methods must be called in the following
|
/// VulkanRuntime, special methods must be called in the following
|
||||||
/// sequence: initRuntime(), run(), updateHostMemoryBuffers(), destroy();
|
/// sequence: initRuntime(), run(), updateHostMemoryBuffers(), destroy();
|
||||||
/// each method in the sequence returns succes or failure depends on the Vulkan
|
/// each method in the sequence returns success or failure depends on the Vulkan
|
||||||
/// result code.
|
/// result code.
|
||||||
class VulkanRuntime {
|
class VulkanRuntime {
|
||||||
public:
|
public:
|
||||||
|
@ -528,7 +528,7 @@ def snake_casify(name):
|
|||||||
|
|
||||||
|
|
||||||
def map_spec_operand_to_ods_argument(operand):
|
def map_spec_operand_to_ods_argument(operand):
|
||||||
"""Maps a operand in SPIR-V JSON spec to an op argument in ODS.
|
"""Maps an operand in SPIR-V JSON spec to an op argument in ODS.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
- A dict containing the operand's kind, quantifier, and name
|
- A dict containing the operand's kind, quantifier, and name
|
||||||
@ -842,7 +842,7 @@ def update_td_op_definitions(path, instructions, docs, filter_list,
|
|||||||
with open(path, 'r') as f:
|
with open(path, 'r') as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
|
|
||||||
# Split the file into chuncks, each containing one op.
|
# Split the file into chunks, each containing one op.
|
||||||
ops = content.split(AUTOGEN_OP_DEF_SEPARATOR)
|
ops = content.split(AUTOGEN_OP_DEF_SEPARATOR)
|
||||||
header = ops[0]
|
header = ops[0]
|
||||||
footer = ops[-1]
|
footer = ops[-1]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user