[mlir][sparse] Better error handling when bufferizing sparse_tensor ops

sparse_tensor ops cannot be bufferized with One-Shot Bufferize. (They can only be analyzed.) The sparse compiler does the actual lowering to memref. Produce a proper error message instead of crashing.

This fixes #61311.

Differential Revision: https://reviews.llvm.org/D158728
This commit is contained in:
Matthias Springer 2023-08-25 08:31:58 +02:00
parent 45d2033828
commit 79ff70fda2
2 changed files with 42 additions and 23 deletions

View File

@ -26,9 +26,19 @@ namespace mlir {
namespace sparse_tensor {
namespace {
template <typename ConcreteModel, typename ConcreteOp>
struct SparseBufferizableOpInterfaceExternalModel
: public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
const BufferizationOptions &options) const {
return op->emitError(
"sparse_tensor ops must be bufferized with the sparse compiler");
}
};
struct ConcatenateOpInterface
: public BufferizableOpInterface::ExternalModel<
ConcatenateOpInterface, sparse_tensor::ConcatenateOp> {
: SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
sparse_tensor::ConcatenateOp> {
bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
@ -52,9 +62,8 @@ struct ConcatenateOpInterface
}
};
struct ConvertOpInterface
: public BufferizableOpInterface::ExternalModel<ConvertOpInterface,
sparse_tensor::ConvertOp> {
struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
ConvertOpInterface, sparse_tensor::ConvertOp> {
bool bufferizesToAllocation(Operation *op, Value value) const {
// ConvertOps may allocate. (Unless they convert between two identical
// types, then they fold away.)
@ -83,8 +92,8 @@ struct ConvertOpInterface
};
struct LoadOpInterface
: public BufferizableOpInterface::ExternalModel<LoadOpInterface,
sparse_tensor::LoadOp> {
: public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
sparse_tensor::LoadOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
return false;
@ -102,8 +111,8 @@ struct LoadOpInterface
};
struct NewOpInterface
: public BufferizableOpInterface::ExternalModel<NewOpInterface,
sparse_tensor::NewOp> {
: public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
sparse_tensor::NewOp> {
bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
const AnalysisState &state) const {
// NewOps allocate but do not write.
@ -114,8 +123,8 @@ struct NewOpInterface
};
struct PackOpInterface
: public BufferizableOpInterface::ExternalModel<PackOpInterface,
sparse_tensor::PackOp> {
: public SparseBufferizableOpInterfaceExternalModel<PackOpInterface,
sparse_tensor::PackOp> {
bool bufferizesToAllocation(Operation *op, Value value) const {
// PackOp reuses all the buffers instead of allocating new ones
return false;
@ -145,9 +154,8 @@ struct PackOpInterface
}
};
struct UnpackOpInterface
: public BufferizableOpInterface::ExternalModel<UnpackOpInterface,
sparse_tensor::UnpackOp> {
struct UnpackOpInterface : public SparseBufferizableOpInterfaceExternalModel<
UnpackOpInterface, sparse_tensor::UnpackOp> {
bool bufferizesToAllocation(Operation *op, Value value) const {
// The output buffer is pre-allocated by the user.
return false;
@ -178,9 +186,8 @@ struct UnpackOpInterface
}
};
struct InsertOpInterface
: public BufferizableOpInterface::ExternalModel<InsertOpInterface,
sparse_tensor::InsertOp> {
struct InsertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
InsertOpInterface, sparse_tensor::InsertOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
return true;
@ -201,7 +208,7 @@ struct InsertOpInterface
};
struct NumberOfEntriesOpInterface
: public BufferizableOpInterface::ExternalModel<
: public SparseBufferizableOpInterfaceExternalModel<
NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
@ -220,7 +227,7 @@ struct NumberOfEntriesOpInterface
};
struct ToCoordinatesBufferOpInterface
: public BufferizableOpInterface::ExternalModel<
: public SparseBufferizableOpInterfaceExternalModel<
ToCoordinatesBufferOpInterface,
sparse_tensor::ToCoordinatesBufferOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
@ -242,7 +249,7 @@ struct ToCoordinatesBufferOpInterface
};
struct ToCoordinatesOpInterface
: public BufferizableOpInterface::ExternalModel<
: public SparseBufferizableOpInterfaceExternalModel<
ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
@ -263,7 +270,7 @@ struct ToCoordinatesOpInterface
};
struct ToPositionsOpInterface
: public BufferizableOpInterface::ExternalModel<
: public SparseBufferizableOpInterfaceExternalModel<
ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
@ -284,8 +291,8 @@ struct ToPositionsOpInterface
};
struct ToValuesOpInterface
: public BufferizableOpInterface::ExternalModel<ToValuesOpInterface,
sparse_tensor::ToValuesOp> {
: public SparseBufferizableOpInterfaceExternalModel<
ToValuesOpInterface, sparse_tensor::ToValuesOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
return true;

View File

@ -0,0 +1,12 @@
// RUN: mlir-opt %s -one-shot-bufferize -verify-diagnostics
#SparseVector = #sparse_tensor.encoding<{
lvlTypes = ["compressed"]
}>
func.func @sparse_tensor_op(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
// expected-error @below{{sparse_tensor ops must be bufferized with the sparse compiler}}
// expected-error @below{{failed to bufferize op}}
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
return %0 : tensor<64xf32, #SparseVector>
}