llvm-capstone/mlir/lib/Bindings/Python/DialectSparseTensor.cpp
Stella Laurenzo f13893f66a [mlir][Python] Upstream the PybindAdaptors.h helpers and use it to implement sparse_tensor.encoding.
* The PybindAdaptors.h file has been evolving across different sub-projects (npcomp, circt) and has been successfully used for out of tree python API interop/extensions and defining custom types.
* Since sparse_tensor.encoding is the first in-tree custom attribute we are supporting, it seemed like the right time to upstream this header and use it to define the attribute in a way that we can support for both in-tree and out-of-tree use (prior, I had not wanted to upstream dead code which was not used in-tree).
* Adapted the circt version of `mlir_type_subclass`, also providing an `mlir_attribute_subclass`. As we get a bit of mileage on this, I would like to transition the builtin types/attributes to this mechanism and delete the old in-tree only `PyConcreteType` and `PyConcreteAttribute` template helpers (which cannot work reliably out of tree as they depend on internals).
* Added support for defaulting the MlirContext if none is passed so that we can support the same idioms as in-tree versions.

There is quite a bit going on here and I can split it up if needed, but would prefer to keep the first use and the header together so sending out in one patch.

Differential Revision: https://reviews.llvm.org/D102144
2021-05-10 17:15:43 +00:00

75 lines
3.0 KiB
C++

//===- DialectLinalg.cpp - 'sparse_tensor' dialect submodule --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "Dialects.h"
#include "mlir-c/Dialect/SparseTensor.h"
#include "mlir-c/IR.h"
#include "mlir/Bindings/Python/PybindAdaptors.h"
namespace py = pybind11;
using namespace llvm;
using namespace mlir;
using namespace mlir::python::adaptors;
void mlir::python::populateDialectSparseTensorSubmodule(
py::module m, const py::module &irModule) {
auto attributeClass = irModule.attr("Attribute");
py::enum_<MlirSparseTensorDimLevelType>(m, "DimLevelType")
.value("dense", MLIR_SPARSE_TENSOR_DIM_LEVEL_DENSE)
.value("compressed", MLIR_SPARSE_TENSOR_DIM_LEVEL_COMPRESSED)
.value("singleton", MLIR_SPARSE_TENSOR_DIM_LEVEL_SINGLETON);
mlir_attribute_subclass(m, "EncodingAttr",
mlirAttributeIsASparseTensorEncodingAttr,
attributeClass)
.def_classmethod(
"get",
[](py::object cls,
std::vector<MlirSparseTensorDimLevelType> dimLevelTypes,
llvm::Optional<MlirAffineMap> dimOrdering, int pointerBitWidth,
int indexBitWidth, MlirContext context) {
return cls(mlirSparseTensorEncodingAttrGet(
context, dimLevelTypes.size(), dimLevelTypes.data(),
dimOrdering ? *dimOrdering : MlirAffineMap{nullptr},
pointerBitWidth, indexBitWidth));
},
py::arg("cls"), py::arg("dim_level_types"), py::arg("dim_ordering"),
py::arg("pointer_bit_width"), py::arg("index_bit_width"),
py::arg("context") = py::none(),
"Gets a sparse_tensor.encoding from parameters.")
.def_property_readonly(
"dim_level_types",
[](MlirAttribute self) {
std::vector<MlirSparseTensorDimLevelType> ret;
for (int i = 0,
e = mlirSparseTensorEncodingGetNumDimLevelTypes(self);
i < e; ++i)
ret.push_back(
mlirSparseTensorEncodingAttrGetDimLevelType(self, i));
return ret;
})
.def_property_readonly(
"dim_ordering",
[](MlirAttribute self) -> llvm::Optional<MlirAffineMap> {
MlirAffineMap ret =
mlirSparseTensorEncodingAttrGetDimOrdering(self);
if (mlirAffineMapIsNull(ret))
return {};
return ret;
})
.def_property_readonly(
"pointer_bit_width",
[](MlirAttribute self) {
return mlirSparseTensorEncodingAttrGetPointerBitWidth(self);
})
.def_property_readonly("index_bit_width", [](MlirAttribute self) {
return mlirSparseTensorEncodingAttrGetIndexBitWidth(self);
});
}