mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-23 13:50:11 +00:00
[mlir][sparse] rename sparse_tensor.(un)pack to sparse_tensor.(dis)as… (#67717)
…semble Pack/Unpack are overridden in many other places, rename the operations to avoid confusion.
This commit is contained in:
parent
9f2fc88b23
commit
6ca47eb49d
@ -53,14 +53,14 @@ def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>,
|
|||||||
let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
|
let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
|
||||||
}
|
}
|
||||||
|
|
||||||
def SparseTensor_PackOp : SparseTensor_Op<"pack", [Pure]>,
|
def SparseTensor_AssembleOp : SparseTensor_Op<"assemble", [Pure]>,
|
||||||
Arguments<(ins TensorOf<[AnyType]>:$values,
|
Arguments<(ins TensorOf<[AnyType]>:$values,
|
||||||
Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$levels)>,
|
Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$levels)>,
|
||||||
Results<(outs AnySparseTensor: $result)> {
|
Results<(outs AnySparseTensor: $result)> {
|
||||||
let summary = "Returns a sparse tensor from the given values, levels";
|
let summary = "Returns a sparse tensor from the given values, levels";
|
||||||
|
|
||||||
let description = [{
|
let description = [{
|
||||||
Packs the values and per-level coordinate or postion arrays into a sparse tensor.
|
Assembles the values and per-level coordinate or postion arrays into a sparse tensor.
|
||||||
The order and types of provided levels must be consistent with the actual storage
|
The order and types of provided levels must be consistent with the actual storage
|
||||||
layout of the returned sparse tensor described below.
|
layout of the returned sparse tensor described below.
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ def SparseTensor_PackOp : SparseTensor_Op<"pack", [Pure]>,
|
|||||||
```mlir
|
```mlir
|
||||||
%values = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64>
|
%values = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64>
|
||||||
%coordinates = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex>
|
%coordinates = arith.constant dense<[[0,0], [1,2], [1,3]]> : tensor<3x2xindex>
|
||||||
%st = sparse_tensor.pack %values, %coordinates
|
%st = sparse_tensor.assemble %values, %coordinates
|
||||||
: tensor<3xf64>, tensor<3x2xindex> to tensor<3x4xf64, #COO>
|
: tensor<3xf64>, tensor<3x2xindex> to tensor<3x4xf64, #COO>
|
||||||
// yields COO format |1.1, 0.0, 0.0, 0.0|
|
// yields COO format |1.1, 0.0, 0.0, 0.0|
|
||||||
// of 3x4 matrix |0.0, 0.0, 2.2, 3.3|
|
// of 3x4 matrix |0.0, 0.0, 2.2, 3.3|
|
||||||
@ -102,7 +102,7 @@ def SparseTensor_PackOp : SparseTensor_Op<"pack", [Pure]>,
|
|||||||
let hasVerifier = 1;
|
let hasVerifier = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
def SparseTensor_UnpackOp : SparseTensor_Op<"unpack", [Pure, SameVariadicResultSize]>,
|
def SparseTensor_DisassembleOp : SparseTensor_Op<"disassemble", [Pure, SameVariadicResultSize]>,
|
||||||
Arguments<(ins AnySparseTensor:$tensor,
|
Arguments<(ins AnySparseTensor:$tensor,
|
||||||
TensorOf<[AnyType]>:$out_values,
|
TensorOf<[AnyType]>:$out_values,
|
||||||
Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$out_levels)>,
|
Variadic<TensorOf<[AnySignlessIntegerOrIndex]>>:$out_levels)>,
|
||||||
@ -113,7 +113,7 @@ def SparseTensor_UnpackOp : SparseTensor_Op<"unpack", [Pure, SameVariadicResultS
|
|||||||
let summary = "Returns the (values, coordinates) pair unpacked from the input tensor";
|
let summary = "Returns the (values, coordinates) pair unpacked from the input tensor";
|
||||||
|
|
||||||
let description = [{
|
let description = [{
|
||||||
The unpack operation is the inverse of `sparse_tensor::pack`. It returns
|
The disassemble operation is the inverse of `sparse_tensor::assemble`. It returns
|
||||||
the values and per-level position and coordinate array to the user
|
the values and per-level position and coordinate array to the user
|
||||||
from the sparse tensor along with the actual length of the memory used in
|
from the sparse tensor along with the actual length of the memory used in
|
||||||
each returned buffer. This operation can be used for returning an
|
each returned buffer. This operation can be used for returning an
|
||||||
@ -132,7 +132,7 @@ def SparseTensor_UnpackOp : SparseTensor_Op<"unpack", [Pure, SameVariadicResultS
|
|||||||
// of 3x4 matrix |0.0, 0.0, 2.2, 3.3|
|
// of 3x4 matrix |0.0, 0.0, 2.2, 3.3|
|
||||||
// |0.0, 0.0, 0.0, 0.0|
|
// |0.0, 0.0, 0.0, 0.0|
|
||||||
%v, %p, %c, %v_len, %p_len, %c_len =
|
%v, %p, %c, %v_len, %p_len, %c_len =
|
||||||
sparse_tensor.unpack %sp : tensor<3x4xf64, #COO>
|
sparse_tensor.disassemble %sp : tensor<3x4xf64, #COO>
|
||||||
outs(%od, %op, %oi : tensor<3xf64>, tensor<2xindex>, tensor<3x2xindex>)
|
outs(%od, %op, %oi : tensor<3xf64>, tensor<2xindex>, tensor<3x2xindex>)
|
||||||
-> tensor<3xf64>, (tensor<2xindex>, tensor<3x2xindex>), index, (index, index)
|
-> tensor<3xf64>, (tensor<2xindex>, tensor<3x2xindex>), index, (index, index)
|
||||||
// %v = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64>
|
// %v = arith.constant dense<[ 1.1, 2.2, 3.3 ]> : tensor<3xf64>
|
||||||
|
@ -974,14 +974,14 @@ static LogicalResult verifyPackUnPack(Operation *op, bool requiresStaticShape,
|
|||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
|
|
||||||
LogicalResult PackOp::verify() {
|
LogicalResult AssembleOp::verify() {
|
||||||
const auto valuesTp = getRankedTensorType(getValues());
|
const auto valuesTp = getRankedTensorType(getValues());
|
||||||
const auto lvlsTp = getLevels().getTypes();
|
const auto lvlsTp = getLevels().getTypes();
|
||||||
const auto resTp = getSparseTensorType(getResult());
|
const auto resTp = getSparseTensorType(getResult());
|
||||||
return verifyPackUnPack(*this, true, resTp, valuesTp, lvlsTp);
|
return verifyPackUnPack(*this, true, resTp, valuesTp, lvlsTp);
|
||||||
}
|
}
|
||||||
|
|
||||||
LogicalResult UnpackOp::verify() {
|
LogicalResult DisassembleOp::verify() {
|
||||||
if (getOutValues().getType() != getRetValues().getType())
|
if (getOutValues().getType() != getRetValues().getType())
|
||||||
return emitError("output values and return value type mismatch");
|
return emitError("output values and return value type mismatch");
|
||||||
|
|
||||||
|
@ -122,11 +122,11 @@ struct NewOpInterface
|
|||||||
bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
|
bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct PackOpInterface
|
struct AssembleOpInterface
|
||||||
: public SparseBufferizableOpInterfaceExternalModel<PackOpInterface,
|
: public SparseBufferizableOpInterfaceExternalModel<
|
||||||
sparse_tensor::PackOp> {
|
AssembleOpInterface, sparse_tensor::AssembleOp> {
|
||||||
bool bufferizesToAllocation(Operation *op, Value value) const {
|
bool bufferizesToAllocation(Operation *op, Value value) const {
|
||||||
// PackOp reuses all the buffers instead of allocating new ones
|
// AssembleOp reuses all the buffers instead of allocating new ones
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +143,7 @@ struct PackOpInterface
|
|||||||
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
|
AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
|
||||||
const AnalysisState &state) const {
|
const AnalysisState &state) const {
|
||||||
assert(op->getNumResults() == 1);
|
assert(op->getNumResults() == 1);
|
||||||
// PackOp reuses the input tensors as values/coordinates instead of
|
// AssembleOp reuses the input tensors as values/coordinates instead of
|
||||||
// creating new ones when packing into a COO format.
|
// creating new ones when packing into a COO format.
|
||||||
return {{op->getOpResult(0), BufferRelation::Equivalent}};
|
return {{op->getOpResult(0), BufferRelation::Equivalent}};
|
||||||
}
|
}
|
||||||
@ -154,8 +154,9 @@ struct PackOpInterface
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct UnpackOpInterface : public SparseBufferizableOpInterfaceExternalModel<
|
struct DisassembleOpInterface
|
||||||
UnpackOpInterface, sparse_tensor::UnpackOp> {
|
: public SparseBufferizableOpInterfaceExternalModel<
|
||||||
|
DisassembleOpInterface, sparse_tensor::DisassembleOp> {
|
||||||
bool bufferizesToAllocation(Operation *op, Value value) const {
|
bool bufferizesToAllocation(Operation *op, Value value) const {
|
||||||
// The output buffer is pre-allocated by the user.
|
// The output buffer is pre-allocated by the user.
|
||||||
return false;
|
return false;
|
||||||
@ -326,8 +327,8 @@ void mlir::sparse_tensor::registerBufferizableOpInterfaceExternalModels(
|
|||||||
sparse_tensor::InsertOp::attachInterface<InsertOpInterface>(*ctx);
|
sparse_tensor::InsertOp::attachInterface<InsertOpInterface>(*ctx);
|
||||||
sparse_tensor::NumberOfEntriesOp::attachInterface<
|
sparse_tensor::NumberOfEntriesOp::attachInterface<
|
||||||
NumberOfEntriesOpInterface>(*ctx);
|
NumberOfEntriesOpInterface>(*ctx);
|
||||||
sparse_tensor::PackOp::attachInterface<PackOpInterface>(*ctx);
|
sparse_tensor::AssembleOp::attachInterface<AssembleOpInterface>(*ctx);
|
||||||
sparse_tensor::UnpackOp::attachInterface<UnpackOpInterface>(*ctx);
|
sparse_tensor::DisassembleOp::attachInterface<DisassembleOpInterface>(*ctx);
|
||||||
sparse_tensor::ToCoordinatesBufferOp::attachInterface<
|
sparse_tensor::ToCoordinatesBufferOp::attachInterface<
|
||||||
ToCoordinatesBufferOpInterface>(*ctx);
|
ToCoordinatesBufferOpInterface>(*ctx);
|
||||||
sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
|
sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
|
||||||
|
@ -795,10 +795,10 @@ rewriteSpGEMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT,
|
|||||||
Value rowC = e1.getResult(0);
|
Value rowC = e1.getResult(0);
|
||||||
token = e1.getAsyncToken();
|
token = e1.getAsyncToken();
|
||||||
auto e2 = genAllocBuffer(rewriter, loc, cTp.getCrdType(), zero, token);
|
auto e2 = genAllocBuffer(rewriter, loc, cTp.getCrdType(), zero, token);
|
||||||
Value colC = e2.getResult(0); // no free needed
|
Value colC = e2.getResult(0); // no free needed
|
||||||
token = e2.getAsyncToken();
|
token = e2.getAsyncToken();
|
||||||
auto e3 = genAllocBuffer(rewriter, loc, dnCType, zero, token);
|
auto e3 = genAllocBuffer(rewriter, loc, dnCType, zero, token);
|
||||||
Value valC = e3.getResult(0); // no free needed
|
Value valC = e3.getResult(0); // no free needed
|
||||||
token = e3.getAsyncToken();
|
token = e3.getAsyncToken();
|
||||||
Operation *spGenC =
|
Operation *spGenC =
|
||||||
genSpMat(rewriter, loc, spmatHandleTp, tokenTp, token, szm, szn, zero,
|
genSpMat(rewriter, loc, spmatHandleTp, tokenTp, token, szm, szn, zero,
|
||||||
@ -900,7 +900,8 @@ rewriteSpGEMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT,
|
|||||||
Value vt = rewriter.create<bufferization::ToTensorOp>(loc, valH);
|
Value vt = rewriter.create<bufferization::ToTensorOp>(loc, valH);
|
||||||
Value rt = rewriter.create<bufferization::ToTensorOp>(loc, rowH);
|
Value rt = rewriter.create<bufferization::ToTensorOp>(loc, rowH);
|
||||||
Value ct = rewriter.create<bufferization::ToTensorOp>(loc, colH);
|
Value ct = rewriter.create<bufferization::ToTensorOp>(loc, colH);
|
||||||
rewriter.replaceOpWithNewOp<PackOp>(op, c.getType(), vt, ValueRange{rt, ct});
|
rewriter.replaceOpWithNewOp<AssembleOp>(op, c.getType(), vt,
|
||||||
|
ValueRange{rt, ct});
|
||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1244,10 +1244,10 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SparsePackOpConverter : public OpConversionPattern<PackOp> {
|
struct SparseAssembleOpConverter : public OpConversionPattern<AssembleOp> {
|
||||||
using OpConversionPattern::OpConversionPattern;
|
using OpConversionPattern::OpConversionPattern;
|
||||||
LogicalResult
|
LogicalResult
|
||||||
matchAndRewrite(PackOp op, OpAdaptor adaptor,
|
matchAndRewrite(AssembleOp op, OpAdaptor adaptor,
|
||||||
ConversionPatternRewriter &rewriter) const override {
|
ConversionPatternRewriter &rewriter) const override {
|
||||||
Location loc = op.getLoc();
|
Location loc = op.getLoc();
|
||||||
const auto stt = getSparseTensorType(op.getResult());
|
const auto stt = getSparseTensorType(op.getResult());
|
||||||
@ -1347,13 +1347,15 @@ struct SparsePackOpConverter : public OpConversionPattern<PackOp> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SparseUnpackOpConverter : public OpConversionPattern<UnpackOp> {
|
struct SparseDisassembleOpConverter
|
||||||
|
: public OpConversionPattern<DisassembleOp> {
|
||||||
using OpConversionPattern::OpConversionPattern;
|
using OpConversionPattern::OpConversionPattern;
|
||||||
SparseUnpackOpConverter(TypeConverter &typeConverter, MLIRContext *context)
|
SparseDisassembleOpConverter(TypeConverter &typeConverter,
|
||||||
|
MLIRContext *context)
|
||||||
: OpConversionPattern(typeConverter, context) {}
|
: OpConversionPattern(typeConverter, context) {}
|
||||||
|
|
||||||
LogicalResult
|
LogicalResult
|
||||||
matchAndRewrite(UnpackOp op, OpAdaptor adaptor,
|
matchAndRewrite(DisassembleOp op, OpAdaptor adaptor,
|
||||||
ConversionPatternRewriter &rewriter) const override {
|
ConversionPatternRewriter &rewriter) const override {
|
||||||
auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
|
auto desc = getDescriptorFromTensorTuple(adaptor.getTensor());
|
||||||
Location loc = op.getLoc();
|
Location loc = op.getLoc();
|
||||||
@ -1571,7 +1573,7 @@ struct SparseNewOpConverter : public OpConversionPattern<NewOp> {
|
|||||||
void mlir::populateSparseTensorCodegenPatterns(
|
void mlir::populateSparseTensorCodegenPatterns(
|
||||||
TypeConverter &typeConverter, RewritePatternSet &patterns,
|
TypeConverter &typeConverter, RewritePatternSet &patterns,
|
||||||
bool createSparseDeallocs, bool enableBufferInitialization) {
|
bool createSparseDeallocs, bool enableBufferInitialization) {
|
||||||
patterns.add<SparsePackOpConverter, SparseUnpackOpConverter,
|
patterns.add<SparseAssembleOpConverter, SparseDisassembleOpConverter,
|
||||||
SparseReturnConverter, SparseCallConverter, SparseDimOpConverter,
|
SparseReturnConverter, SparseCallConverter, SparseDimOpConverter,
|
||||||
SparseCastConverter, SparseExtractSliceConverter,
|
SparseCastConverter, SparseExtractSliceConverter,
|
||||||
SparseTensorLoadConverter, SparseExpandConverter,
|
SparseTensorLoadConverter, SparseExpandConverter,
|
||||||
|
@ -1493,15 +1493,15 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
/// Sparse conversion rule for the sparse_tensor.pack operator.
|
/// Sparse conversion rule for the sparse_tensor.pack operator.
|
||||||
class SparseTensorPackConverter : public OpConversionPattern<PackOp> {
|
class SparseTensorAssembleConverter : public OpConversionPattern<AssembleOp> {
|
||||||
public:
|
public:
|
||||||
using OpConversionPattern::OpConversionPattern;
|
using OpConversionPattern::OpConversionPattern;
|
||||||
LogicalResult
|
LogicalResult
|
||||||
matchAndRewrite(PackOp op, OpAdaptor adaptor,
|
matchAndRewrite(AssembleOp op, OpAdaptor adaptor,
|
||||||
ConversionPatternRewriter &rewriter) const override {
|
ConversionPatternRewriter &rewriter) const override {
|
||||||
const Location loc = op->getLoc();
|
const Location loc = op->getLoc();
|
||||||
const auto dstTp = getSparseTensorType(op.getResult());
|
const auto dstTp = getSparseTensorType(op.getResult());
|
||||||
// PackOps always returns a static shaped tensor result.
|
// AssembleOps always returns a static shaped tensor result.
|
||||||
assert(dstTp.hasStaticDimShape());
|
assert(dstTp.hasStaticDimShape());
|
||||||
SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, dstTp);
|
SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, dstTp);
|
||||||
Value dst =
|
Value dst =
|
||||||
@ -1546,7 +1546,7 @@ void mlir::populateSparseTensorConversionPatterns(
|
|||||||
SparseTensorToValuesConverter, SparseNumberOfEntriesConverter,
|
SparseTensorToValuesConverter, SparseNumberOfEntriesConverter,
|
||||||
SparseTensorLoadConverter, SparseTensorInsertConverter,
|
SparseTensorLoadConverter, SparseTensorInsertConverter,
|
||||||
SparseTensorExpandConverter, SparseTensorCompressConverter,
|
SparseTensorExpandConverter, SparseTensorCompressConverter,
|
||||||
SparseTensorOutConverter, SparseTensorPackConverter>(
|
SparseTensorOutConverter, SparseTensorAssembleConverter>(
|
||||||
typeConverter, patterns.getContext());
|
typeConverter, patterns.getContext());
|
||||||
patterns.add<SparseTensorConvertConverter>(typeConverter,
|
patterns.add<SparseTensorConvertConverter>(typeConverter,
|
||||||
patterns.getContext(), options);
|
patterns.getContext(), options);
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
// CHECK: %[[VAL_a2:.*]] = bufferization.to_tensor %[[VAL_83]] : memref<?xf32>
|
// CHECK: %[[VAL_a2:.*]] = bufferization.to_tensor %[[VAL_83]] : memref<?xf32>
|
||||||
// CHECK: %[[VAL_a3:.*]] = bufferization.to_tensor %[[VAL_81]] : memref<?xindex>
|
// CHECK: %[[VAL_a3:.*]] = bufferization.to_tensor %[[VAL_81]] : memref<?xindex>
|
||||||
// CHECK: %[[VAL_a4:.*]] = bufferization.to_tensor %[[VAL_82]] : memref<?xindex>
|
// CHECK: %[[VAL_a4:.*]] = bufferization.to_tensor %[[VAL_82]] : memref<?xindex>
|
||||||
// CHECK: %[[VAL_a5:.*]] = sparse_tensor.pack %[[VAL_a2]], %[[VAL_a3]], %[[VAL_a4]] : tensor<?xf32>, tensor<?xindex>, tensor<?xindex> to tensor<8x8xf32, #{{.*}}>
|
// CHECK: %[[VAL_a5:.*]] = sparse_tensor.assemble %[[VAL_a2]], %[[VAL_a3]], %[[VAL_a4]] : tensor<?xf32>, tensor<?xindex>, tensor<?xindex> to tensor<8x8xf32, #{{.*}}>
|
||||||
// CHECK: return %[[VAL_a5]] : tensor<8x8xf32, #{{.*}}>
|
// CHECK: return %[[VAL_a5]] : tensor<8x8xf32, #{{.*}}>
|
||||||
// CHECK: }
|
// CHECK: }
|
||||||
func.func @matmulCSR(%A: tensor<8x8xf32, #CSR>,
|
func.func @matmulCSR(%A: tensor<8x8xf32, #CSR>,
|
||||||
|
@ -13,7 +13,7 @@ func.func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
|
|||||||
func.func @non_static_pack_ret(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>)
|
func.func @non_static_pack_ret(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>)
|
||||||
-> tensor<?xf64, #SparseVector> {
|
-> tensor<?xf64, #SparseVector> {
|
||||||
// expected-error@+1 {{the sparse-tensor must have static shape}}
|
// expected-error@+1 {{the sparse-tensor must have static shape}}
|
||||||
%0 = sparse_tensor.pack %values, %pos, %coordinates
|
%0 = sparse_tensor.assemble %values, %pos, %coordinates
|
||||||
: tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32> to tensor<?xf64, #SparseVector>
|
: tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32> to tensor<?xf64, #SparseVector>
|
||||||
return %0 : tensor<?xf64, #SparseVector>
|
return %0 : tensor<?xf64, #SparseVector>
|
||||||
}
|
}
|
||||||
@ -25,7 +25,7 @@ func.func @non_static_pack_ret(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coo
|
|||||||
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>)
|
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>)
|
||||||
-> tensor<100xf32, #SparseVector> {
|
-> tensor<100xf32, #SparseVector> {
|
||||||
// expected-error@+1 {{input/output element-types don't match}}
|
// expected-error@+1 {{input/output element-types don't match}}
|
||||||
%0 = sparse_tensor.pack %values, %pos, %coordinates
|
%0 = sparse_tensor.assemble %values, %pos, %coordinates
|
||||||
: tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32> to tensor<100xf32, #SparseVector>
|
: tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32> to tensor<100xf32, #SparseVector>
|
||||||
return %0 : tensor<100xf32, #SparseVector>
|
return %0 : tensor<100xf32, #SparseVector>
|
||||||
}
|
}
|
||||||
@ -37,7 +37,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord
|
|||||||
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
|
func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>)
|
||||||
-> tensor<100x2xf64, #SparseVector> {
|
-> tensor<100x2xf64, #SparseVector> {
|
||||||
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
|
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
|
||||||
%0 = sparse_tensor.pack %values, %pos, %coordinates
|
%0 = sparse_tensor.assemble %values, %pos, %coordinates
|
||||||
: tensor<6xf64>, tensor<2xi32>, tensor<6x3xi32> to tensor<100x2xf64, #SparseVector>
|
: tensor<6xf64>, tensor<2xi32>, tensor<6x3xi32> to tensor<100x2xf64, #SparseVector>
|
||||||
return %0 : tensor<100x2xf64, #SparseVector>
|
return %0 : tensor<100x2xf64, #SparseVector>
|
||||||
}
|
}
|
||||||
@ -49,7 +49,7 @@ func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coord
|
|||||||
func.func @invalid_pack_mis_position(%values: tensor<6xf64>, %coordinates: tensor<6xi32>)
|
func.func @invalid_pack_mis_position(%values: tensor<6xf64>, %coordinates: tensor<6xi32>)
|
||||||
-> tensor<2x100xf64, #CSR> {
|
-> tensor<2x100xf64, #CSR> {
|
||||||
// expected-error@+1 {{inconsistent number of fields between input/output}}
|
// expected-error@+1 {{inconsistent number of fields between input/output}}
|
||||||
%0 = sparse_tensor.pack %values, %coordinates
|
%0 = sparse_tensor.assemble %values, %coordinates
|
||||||
: tensor<6xf64>, tensor<6xi32> to tensor<2x100xf64, #CSR>
|
: tensor<6xf64>, tensor<6xi32> to tensor<2x100xf64, #CSR>
|
||||||
return %0 : tensor<2x100xf64, #CSR>
|
return %0 : tensor<2x100xf64, #CSR>
|
||||||
}
|
}
|
||||||
@ -60,7 +60,7 @@ func.func @invalid_pack_mis_position(%values: tensor<6xf64>, %coordinates: tenso
|
|||||||
|
|
||||||
func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>) {
|
func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>) {
|
||||||
// expected-error@+1 {{input/output element-types don't match}}
|
// expected-error@+1 {{input/output element-types don't match}}
|
||||||
%rv, %rp, %rc, %vl, %pl, %cl = sparse_tensor.unpack %sp : tensor<100xf32, #SparseVector>
|
%rv, %rp, %rc, %vl, %pl, %cl = sparse_tensor.disassemble %sp : tensor<100xf32, #SparseVector>
|
||||||
outs(%values, %pos, %coordinates : tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32>)
|
outs(%values, %pos, %coordinates : tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32>)
|
||||||
-> tensor<6xf64>, (tensor<2xi32>, tensor<6x1xi32>), index, (index, index)
|
-> tensor<6xf64>, (tensor<2xi32>, tensor<6x1xi32>), index, (index, index)
|
||||||
return
|
return
|
||||||
@ -72,7 +72,7 @@ func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: ten
|
|||||||
|
|
||||||
func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
|
func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) {
|
||||||
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
|
// expected-error@+1 {{input/output trailing COO level-ranks don't match}}
|
||||||
%rv, %rp, %rc, %vl, %pl, %cl = sparse_tensor.unpack %sp : tensor<100x2xf64, #SparseVector>
|
%rv, %rp, %rc, %vl, %pl, %cl = sparse_tensor.disassemble %sp : tensor<100x2xf64, #SparseVector>
|
||||||
outs(%values, %pos, %coordinates : tensor<6xf64>, tensor<2xi32>, tensor<6x3xi32>)
|
outs(%values, %pos, %coordinates : tensor<6xf64>, tensor<2xi32>, tensor<6x3xi32>)
|
||||||
-> tensor<6xf64>, (tensor<2xi32>, tensor<6x3xi32>), index, (index, index)
|
-> tensor<6xf64>, (tensor<2xi32>, tensor<6x3xi32>), index, (index, index)
|
||||||
return
|
return
|
||||||
@ -84,7 +84,7 @@ func.func @invalid_unpack_type(%sp: tensor<100x2xf64, #SparseVector>, %values: t
|
|||||||
|
|
||||||
func.func @invalid_unpack_mis_position(%sp: tensor<2x100xf64, #CSR>, %values: tensor<6xf64>, %coordinates: tensor<6xi32>) {
|
func.func @invalid_unpack_mis_position(%sp: tensor<2x100xf64, #CSR>, %values: tensor<6xf64>, %coordinates: tensor<6xi32>) {
|
||||||
// expected-error@+1 {{inconsistent number of fields between input/output}}
|
// expected-error@+1 {{inconsistent number of fields between input/output}}
|
||||||
%rv, %rc, %vl, %pl = sparse_tensor.unpack %sp : tensor<2x100xf64, #CSR>
|
%rv, %rc, %vl, %pl = sparse_tensor.disassemble %sp : tensor<2x100xf64, #CSR>
|
||||||
outs(%values, %coordinates : tensor<6xf64>, tensor<6xi32>)
|
outs(%values, %coordinates : tensor<6xf64>, tensor<6xi32>)
|
||||||
-> tensor<6xf64>, (tensor<6xi32>), index, (index)
|
-> tensor<6xf64>, (tensor<6xi32>), index, (index)
|
||||||
return
|
return
|
||||||
|
@ -35,7 +35,7 @@ func.func @foo(%arg0: tensor<3xf64> {bufferization.writable = false},
|
|||||||
//
|
//
|
||||||
// Pack the buffers into a sparse tensors.
|
// Pack the buffers into a sparse tensors.
|
||||||
//
|
//
|
||||||
%pack = sparse_tensor.pack %arg0, %arg2, %arg1
|
%pack = sparse_tensor.assemble %arg0, %arg2, %arg1
|
||||||
: tensor<3xf64>,
|
: tensor<3xf64>,
|
||||||
tensor<11xi32>,
|
tensor<11xi32>,
|
||||||
tensor<3xi32> to tensor<10x10xf64, #CSR>
|
tensor<3xi32> to tensor<10x10xf64, #CSR>
|
||||||
@ -76,7 +76,7 @@ func.func @bar(%arg0: tensor<3xf64> {bufferization.writable = true},
|
|||||||
//
|
//
|
||||||
// Pack the buffers into a sparse tensors.
|
// Pack the buffers into a sparse tensors.
|
||||||
//
|
//
|
||||||
%pack = sparse_tensor.pack %arg0, %arg2, %arg1
|
%pack = sparse_tensor.assemble %arg0, %arg2, %arg1
|
||||||
: tensor<3xf64>,
|
: tensor<3xf64>,
|
||||||
tensor<11xi32>,
|
tensor<11xi32>,
|
||||||
tensor<3xi32> to tensor<10x10xf64, #CSR>
|
tensor<3xi32> to tensor<10x10xf64, #CSR>
|
||||||
|
@ -19,11 +19,11 @@ func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
|
|||||||
// CHECK-SAME: %[[D:.*]]: tensor<6xf64>,
|
// CHECK-SAME: %[[D:.*]]: tensor<6xf64>,
|
||||||
// CHECK-SAME: %[[P:.*]]: tensor<2xi32>,
|
// CHECK-SAME: %[[P:.*]]: tensor<2xi32>,
|
||||||
// CHECK-SAME: %[[I:.*]]: tensor<6x1xi32>)
|
// CHECK-SAME: %[[I:.*]]: tensor<6x1xi32>)
|
||||||
// CHECK: %[[R:.*]] = sparse_tensor.pack %[[D]], %[[P]], %[[I]]
|
// CHECK: %[[R:.*]] = sparse_tensor.assemble %[[D]], %[[P]], %[[I]]
|
||||||
// CHECK: return %[[R]] : tensor<100xf64, #{{.*}}>
|
// CHECK: return %[[R]] : tensor<100xf64, #{{.*}}>
|
||||||
func.func @sparse_pack(%data: tensor<6xf64>, %pos: tensor<2xi32>, %index: tensor<6x1xi32>)
|
func.func @sparse_pack(%data: tensor<6xf64>, %pos: tensor<2xi32>, %index: tensor<6x1xi32>)
|
||||||
-> tensor<100xf64, #SparseVector> {
|
-> tensor<100xf64, #SparseVector> {
|
||||||
%0 = sparse_tensor.pack %data, %pos, %index : tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32>
|
%0 = sparse_tensor.assemble %data, %pos, %index : tensor<6xf64>, tensor<2xi32>, tensor<6x1xi32>
|
||||||
to tensor<100xf64, #SparseVector>
|
to tensor<100xf64, #SparseVector>
|
||||||
return %0 : tensor<100xf64, #SparseVector>
|
return %0 : tensor<100xf64, #SparseVector>
|
||||||
}
|
}
|
||||||
@ -36,14 +36,14 @@ func.func @sparse_pack(%data: tensor<6xf64>, %pos: tensor<2xi32>, %index: tensor
|
|||||||
// CHECK-SAME: %[[OD:.*]]: tensor<6xf64>
|
// CHECK-SAME: %[[OD:.*]]: tensor<6xf64>
|
||||||
// CHECK-SAME: %[[OP:.*]]: tensor<2xindex>
|
// CHECK-SAME: %[[OP:.*]]: tensor<2xindex>
|
||||||
// CHECK-SAME: %[[OI:.*]]: tensor<6x1xi32>
|
// CHECK-SAME: %[[OI:.*]]: tensor<6x1xi32>
|
||||||
// CHECK: %[[D:.*]], %[[P:.*]]:2, %[[DL:.*]], %[[PL:.*]]:2 = sparse_tensor.unpack %[[T]]
|
// CHECK: %[[D:.*]], %[[P:.*]]:2, %[[DL:.*]], %[[PL:.*]]:2 = sparse_tensor.disassemble %[[T]]
|
||||||
// CHECK: return %[[D]], %[[P]]#0, %[[P]]#1
|
// CHECK: return %[[D]], %[[P]]#0, %[[P]]#1
|
||||||
func.func @sparse_unpack(%sp : tensor<100xf64, #SparseVector>,
|
func.func @sparse_unpack(%sp : tensor<100xf64, #SparseVector>,
|
||||||
%od : tensor<6xf64>,
|
%od : tensor<6xf64>,
|
||||||
%op : tensor<2xindex>,
|
%op : tensor<2xindex>,
|
||||||
%oi : tensor<6x1xi32>)
|
%oi : tensor<6x1xi32>)
|
||||||
-> (tensor<6xf64>, tensor<2xindex>, tensor<6x1xi32>) {
|
-> (tensor<6xf64>, tensor<2xindex>, tensor<6x1xi32>) {
|
||||||
%rd, %rp, %ri, %vl, %pl, %cl = sparse_tensor.unpack %sp : tensor<100xf64, #SparseVector>
|
%rd, %rp, %ri, %vl, %pl, %cl = sparse_tensor.disassemble %sp : tensor<100xf64, #SparseVector>
|
||||||
outs(%od, %op, %oi : tensor<6xf64>, tensor<2xindex>, tensor<6x1xi32>)
|
outs(%od, %op, %oi : tensor<6xf64>, tensor<2xindex>, tensor<6x1xi32>)
|
||||||
-> tensor<6xf64>, (tensor<2xindex>, tensor<6x1xi32>), index, (index, index)
|
-> tensor<6xf64>, (tensor<2xindex>, tensor<6x1xi32>), index, (index, index)
|
||||||
return %rd, %rp, %ri : tensor<6xf64>, tensor<2xindex>, tensor<6x1xi32>
|
return %rd, %rp, %ri : tensor<6xf64>, tensor<2xindex>, tensor<6x1xi32>
|
||||||
|
@ -31,7 +31,7 @@
|
|||||||
// CHECK: }
|
// CHECK: }
|
||||||
func.func @sparse_pack(%values: tensor<6xf64>, %pos:tensor<2xindex>, %coordinates: tensor<6x2xi32>)
|
func.func @sparse_pack(%values: tensor<6xf64>, %pos:tensor<2xindex>, %coordinates: tensor<6x2xi32>)
|
||||||
-> tensor<100x100xf64, #COO> {
|
-> tensor<100x100xf64, #COO> {
|
||||||
%0 = sparse_tensor.pack %values, %pos, %coordinates
|
%0 = sparse_tensor.assemble %values, %pos, %coordinates
|
||||||
: tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32> to tensor<100x100xf64, #COO>
|
: tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32> to tensor<100x100xf64, #COO>
|
||||||
return %0 : tensor<100x100xf64, #COO>
|
return %0 : tensor<100x100xf64, #COO>
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ func.func @sparse_unpack(%sp : tensor<100x100xf64, #COO>,
|
|||||||
%op : tensor<2xindex>,
|
%op : tensor<2xindex>,
|
||||||
%oi : tensor<6x2xi32>)
|
%oi : tensor<6x2xi32>)
|
||||||
-> (tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>) {
|
-> (tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>) {
|
||||||
%rd, %rp, %ri, %dl, %pl, %il = sparse_tensor.unpack %sp : tensor<100x100xf64, #COO>
|
%rd, %rp, %ri, %dl, %pl, %il = sparse_tensor.disassemble %sp : tensor<100x100xf64, #COO>
|
||||||
outs(%od, %op, %oi : tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>)
|
outs(%od, %op, %oi : tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>)
|
||||||
-> tensor<6xf64>, (tensor<2xindex>, tensor<6x2xi32>), index, (index, index)
|
-> tensor<6xf64>, (tensor<2xindex>, tensor<6x2xi32>), index, (index, index)
|
||||||
return %rd, %rp, %ri : tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>
|
return %rd, %rp, %ri : tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4
|
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false vl=4
|
||||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
|
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
|
||||||
|
|
||||||
// TODO: support sparse_tensor.unpack on libgen path.
|
// TODO: support sparse_tensor.disassemble on libgen path.
|
||||||
|
|
||||||
#SortedCOO = #sparse_tensor.encoding<{
|
#SortedCOO = #sparse_tensor.encoding<{
|
||||||
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
|
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
|
||||||
@ -81,9 +81,9 @@ module {
|
|||||||
[ 7, 8]]
|
[ 7, 8]]
|
||||||
> : tensor<3x2xi32>
|
> : tensor<3x2xi32>
|
||||||
|
|
||||||
%s4 = sparse_tensor.pack %data, %pos, %index : tensor<3xf64>, tensor<2xindex>, tensor<3x2xindex>
|
%s4 = sparse_tensor.assemble %data, %pos, %index : tensor<3xf64>, tensor<2xindex>, tensor<3x2xindex>
|
||||||
to tensor<10x10xf64, #SortedCOO>
|
to tensor<10x10xf64, #SortedCOO>
|
||||||
%s5= sparse_tensor.pack %data, %pos32, %index32 : tensor<3xf64>, tensor<2xi32>, tensor<3x2xi32>
|
%s5= sparse_tensor.assemble %data, %pos32, %index32 : tensor<3xf64>, tensor<2xi32>, tensor<3x2xi32>
|
||||||
to tensor<10x10xf64, #SortedCOOI32>
|
to tensor<10x10xf64, #SortedCOOI32>
|
||||||
|
|
||||||
%csr_data = arith.constant dense<
|
%csr_data = arith.constant dense<
|
||||||
@ -97,7 +97,7 @@ module {
|
|||||||
%csr_index32 = arith.constant dense<
|
%csr_index32 = arith.constant dense<
|
||||||
[1, 0, 1]
|
[1, 0, 1]
|
||||||
> : tensor<3xi32>
|
> : tensor<3xi32>
|
||||||
%csr= sparse_tensor.pack %csr_data, %csr_pos32, %csr_index32 : tensor<4xf64>, tensor<3xi32>, tensor<3xi32>
|
%csr= sparse_tensor.assemble %csr_data, %csr_pos32, %csr_index32 : tensor<4xf64>, tensor<3xi32>, tensor<3xi32>
|
||||||
to tensor<2x2xf64, #CSR>
|
to tensor<2x2xf64, #CSR>
|
||||||
|
|
||||||
%bdata = arith.constant dense<
|
%bdata = arith.constant dense<
|
||||||
@ -116,7 +116,7 @@ module {
|
|||||||
[ 4, 2],
|
[ 4, 2],
|
||||||
[ 10, 10]]
|
[ 10, 10]]
|
||||||
> : tensor<6x2xindex>
|
> : tensor<6x2xindex>
|
||||||
%bs = sparse_tensor.pack %bdata, %bpos, %bindex :
|
%bs = sparse_tensor.assemble %bdata, %bpos, %bindex :
|
||||||
tensor<6xf64>, tensor<4xindex>, tensor<6x2xindex> to tensor<2x10x10xf64, #BCOO>
|
tensor<6xf64>, tensor<4xindex>, tensor<6x2xindex> to tensor<2x10x10xf64, #BCOO>
|
||||||
|
|
||||||
// CHECK:1
|
// CHECK:1
|
||||||
@ -176,7 +176,7 @@ module {
|
|||||||
%d_csr = tensor.empty() : tensor<4xf64>
|
%d_csr = tensor.empty() : tensor<4xf64>
|
||||||
%p_csr = tensor.empty() : tensor<3xi32>
|
%p_csr = tensor.empty() : tensor<3xi32>
|
||||||
%i_csr = tensor.empty() : tensor<3xi32>
|
%i_csr = tensor.empty() : tensor<3xi32>
|
||||||
%rd_csr, %rp_csr, %ri_csr, %ld_csr, %lp_csr, %li_csr = sparse_tensor.unpack %csr : tensor<2x2xf64, #CSR>
|
%rd_csr, %rp_csr, %ri_csr, %ld_csr, %lp_csr, %li_csr = sparse_tensor.disassemble %csr : tensor<2x2xf64, #CSR>
|
||||||
outs(%d_csr, %p_csr, %i_csr : tensor<4xf64>, tensor<3xi32>, tensor<3xi32>)
|
outs(%d_csr, %p_csr, %i_csr : tensor<4xf64>, tensor<3xi32>, tensor<3xi32>)
|
||||||
-> tensor<4xf64>, (tensor<3xi32>, tensor<3xi32>), index, (i32, i64)
|
-> tensor<4xf64>, (tensor<3xi32>, tensor<3xi32>), index, (i32, i64)
|
||||||
|
|
||||||
@ -201,7 +201,7 @@ module {
|
|||||||
%od = tensor.empty() : tensor<3xf64>
|
%od = tensor.empty() : tensor<3xf64>
|
||||||
%op = tensor.empty() : tensor<2xi32>
|
%op = tensor.empty() : tensor<2xi32>
|
||||||
%oi = tensor.empty() : tensor<3x2xi32>
|
%oi = tensor.empty() : tensor<3x2xi32>
|
||||||
%d, %p, %i, %dl, %pl, %il = sparse_tensor.unpack %s5 : tensor<10x10xf64, #SortedCOOI32>
|
%d, %p, %i, %dl, %pl, %il = sparse_tensor.disassemble %s5 : tensor<10x10xf64, #SortedCOOI32>
|
||||||
outs(%od, %op, %oi : tensor<3xf64>, tensor<2xi32>, tensor<3x2xi32>)
|
outs(%od, %op, %oi : tensor<3xf64>, tensor<2xi32>, tensor<3x2xi32>)
|
||||||
-> tensor<3xf64>, (tensor<2xi32>, tensor<3x2xi32>), index, (i32, i64)
|
-> tensor<3xf64>, (tensor<2xi32>, tensor<3x2xi32>), index, (i32, i64)
|
||||||
|
|
||||||
@ -217,7 +217,7 @@ module {
|
|||||||
%bod = tensor.empty() : tensor<6xf64>
|
%bod = tensor.empty() : tensor<6xf64>
|
||||||
%bop = tensor.empty() : tensor<4xindex>
|
%bop = tensor.empty() : tensor<4xindex>
|
||||||
%boi = tensor.empty() : tensor<6x2xindex>
|
%boi = tensor.empty() : tensor<6x2xindex>
|
||||||
%bd, %bp, %bi, %ld, %lp, %li = sparse_tensor.unpack %bs : tensor<2x10x10xf64, #BCOO>
|
%bd, %bp, %bi, %ld, %lp, %li = sparse_tensor.disassemble %bs : tensor<2x10x10xf64, #BCOO>
|
||||||
outs(%bod, %bop, %boi : tensor<6xf64>, tensor<4xindex>, tensor<6x2xindex>)
|
outs(%bod, %bop, %boi : tensor<6xf64>, tensor<4xindex>, tensor<6x2xindex>)
|
||||||
-> tensor<6xf64>, (tensor<4xindex>, tensor<6x2xindex>), index, (i32, tensor<i64>)
|
-> tensor<6xf64>, (tensor<4xindex>, tensor<6x2xindex>), index, (i32, tensor<i64>)
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
|
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
|
||||||
|
|
||||||
// TODO: This is considered to be a short-living tests and should be merged with sparse_pack.mlir
|
// TODO: This is considered to be a short-living tests and should be merged with sparse_pack.mlir
|
||||||
// after sparse_tensor.unpack is supported on libgen path.
|
// after sparse_tensor.disassemble is supported on libgen path.
|
||||||
|
|
||||||
#SortedCOO = #sparse_tensor.encoding<{
|
#SortedCOO = #sparse_tensor.encoding<{
|
||||||
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
|
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
|
||||||
@ -82,9 +82,9 @@ module {
|
|||||||
[ 7, 8]]
|
[ 7, 8]]
|
||||||
> : tensor<3x2xi32>
|
> : tensor<3x2xi32>
|
||||||
|
|
||||||
%s4 = sparse_tensor.pack %data, %pos, %index : tensor<3xf64>, tensor<2xindex>, tensor<3x2xindex>
|
%s4 = sparse_tensor.assemble %data, %pos, %index : tensor<3xf64>, tensor<2xindex>, tensor<3x2xindex>
|
||||||
to tensor<10x10xf64, #SortedCOO>
|
to tensor<10x10xf64, #SortedCOO>
|
||||||
%s5= sparse_tensor.pack %data, %pos32, %index32 : tensor<3xf64>, tensor<2xi32>, tensor<3x2xi32>
|
%s5= sparse_tensor.assemble %data, %pos32, %index32 : tensor<3xf64>, tensor<2xi32>, tensor<3x2xi32>
|
||||||
to tensor<10x10xf64, #SortedCOOI32>
|
to tensor<10x10xf64, #SortedCOOI32>
|
||||||
|
|
||||||
%csr_data = arith.constant dense<
|
%csr_data = arith.constant dense<
|
||||||
@ -98,7 +98,7 @@ module {
|
|||||||
%csr_index32 = arith.constant dense<
|
%csr_index32 = arith.constant dense<
|
||||||
[1, 0, 1]
|
[1, 0, 1]
|
||||||
> : tensor<3xi32>
|
> : tensor<3xi32>
|
||||||
%csr= sparse_tensor.pack %csr_data, %csr_pos32, %csr_index32 : tensor<4xf64>, tensor<3xi32>, tensor<3xi32>
|
%csr= sparse_tensor.assemble %csr_data, %csr_pos32, %csr_index32 : tensor<4xf64>, tensor<3xi32>, tensor<3xi32>
|
||||||
to tensor<2x2xf64, #CSR>
|
to tensor<2x2xf64, #CSR>
|
||||||
|
|
||||||
// CHECK:1
|
// CHECK:1
|
||||||
|
Loading…
Reference in New Issue
Block a user