mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-04-01 12:43:47 +00:00
[mlir][sparse] support sparse bufferization.alloc_tensor with copy argument.
Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D147358
This commit is contained in:
parent
94a74b9dbb
commit
7b86f7c5d4
@ -749,11 +749,29 @@ public:
|
||||
const auto resType = getSparseTensorType(op);
|
||||
if (!resType.hasEncoding())
|
||||
return failure();
|
||||
if (op.getCopy())
|
||||
return rewriter.notifyMatchFailure(op, "tensor copy not implemented");
|
||||
|
||||
// Construct allocation for each field.
|
||||
const Location loc = op.getLoc();
|
||||
if (op.getCopy()) {
|
||||
auto desc = getDescriptorFromTensorTuple(adaptor.getCopy());
|
||||
SmallVector<Value> fields;
|
||||
fields.reserve(desc.getNumFields());
|
||||
// Memcpy on memref fields.
|
||||
for (auto field : desc.getMemRefFields()) {
|
||||
auto memrefTp = field.getType().cast<MemRefType>();
|
||||
auto size = rewriter.create<memref::DimOp>(loc, field, 0);
|
||||
auto copied =
|
||||
rewriter.create<memref::AllocOp>(loc, memrefTp, ValueRange{size});
|
||||
rewriter.create<memref::CopyOp>(loc, field, copied);
|
||||
fields.push_back(copied);
|
||||
}
|
||||
// Reuses specifier.
|
||||
fields.push_back(desc.getSpecifier());
|
||||
assert(fields.size() == desc.getNumFields());
|
||||
rewriter.replaceOp(op, genTuple(rewriter, loc, resType, fields));
|
||||
return success();
|
||||
}
|
||||
|
||||
const Value sizeHint = op.getSizeHint();
|
||||
const ValueRange dynSizes = adaptor.getDynamicSizes();
|
||||
const size_t found = dynSizes.size();
|
||||
|
44
mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
Normal file
44
mlir/test/Dialect/SparseTensor/codegen_sparse_alloc.mlir
Normal file
@ -0,0 +1,44 @@
|
||||
// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
|
||||
|
||||
#CSR = #sparse_tensor.encoding<{ dimLevelType = ["dense", "compressed"]}>
|
||||
#COO = #sparse_tensor.encoding<{ dimLevelType = ["compressed-nu", "singleton"]}>
|
||||
|
||||
// CHECK-LABEL: func.func @sparse_alloc_copy_CSR(
|
||||
// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xf32>,
|
||||
// CHECK-SAME: %[[VAL_3:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#{{.*}}>) {
|
||||
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = memref.dim %[[VAL_0]], %[[VAL_4]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = memref.alloc(%[[VAL_5]]) : memref<?xindex>
|
||||
// CHECK: memref.copy %[[VAL_0]], %[[VAL_6]] : memref<?xindex> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.alloc(%[[VAL_7]]) : memref<?xindex>
|
||||
// CHECK: memref.copy %[[VAL_1]], %[[VAL_8]] : memref<?xindex> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.alloc(%[[VAL_9]]) : memref<?xf32>
|
||||
// CHECK: memref.copy %[[VAL_2]], %[[VAL_10]] : memref<?xf32> to memref<?xf32>
|
||||
func.func @sparse_alloc_copy_CSR(%arg0: tensor<2x2xf32, #CSR>) -> tensor<2x2xf32, #CSR> {
|
||||
%0 = bufferization.alloc_tensor() copy(%arg0) : tensor<2x2xf32, #CSR>
|
||||
"test.sink"(%0) : (tensor<2x2xf32, #CSR>) -> ()
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func.func @sparse_alloc_copy_COO(
|
||||
// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xf32>,
|
||||
// CHECK-SAME: %[[VAL_3:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>) -> (memref<?xindex>, memref<?xindex>, memref<?xf32>, !sparse_tensor.storage_specifier<#{{.*}}>) {
|
||||
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = memref.dim %[[VAL_0]], %[[VAL_4]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = memref.alloc(%[[VAL_5]]) : memref<?xindex>
|
||||
// CHECK: memref.copy %[[VAL_0]], %[[VAL_6]] : memref<?xindex> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.alloc(%[[VAL_7]]) : memref<?xindex>
|
||||
// CHECK: memref.copy %[[VAL_1]], %[[VAL_8]] : memref<?xindex> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.alloc(%[[VAL_9]]) : memref<?xf32>
|
||||
// CHECK: memref.copy %[[VAL_2]], %[[VAL_10]] : memref<?xf32> to memref<?xf32>
|
||||
func.func @sparse_alloc_copy_COO(%arg0: tensor<2x2xf32, #COO>) -> tensor<2x2xf32, #COO> {
|
||||
%0 = bufferization.alloc_tensor() copy(%arg0) : tensor<2x2xf32, #COO>
|
||||
"test.sink"(%0) : (tensor<2x2xf32, #COO>) -> ()
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user