mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-12-14 11:39:35 +00:00
[mlir][sparse] Print new syntax (#68130)
Printing changes from `#sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>` to `map = (d0) -> (d0 : compressed)`. Level properties, ELL and slice are also supported.
This commit is contained in:
parent
7794e16b49
commit
6280e23124
@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
|
||||
case DimLevelType::Compressed:
|
||||
return "compressed";
|
||||
case DimLevelType::CompressedNu:
|
||||
return "compressed_nu";
|
||||
return "compressed(nonunique)";
|
||||
case DimLevelType::CompressedNo:
|
||||
return "compressed_no";
|
||||
return "compressed(nonordered)";
|
||||
case DimLevelType::CompressedNuNo:
|
||||
return "compressed_nu_no";
|
||||
return "compressed(nonunique, nonordered)";
|
||||
case DimLevelType::Singleton:
|
||||
return "singleton";
|
||||
case DimLevelType::SingletonNu:
|
||||
return "singleton_nu";
|
||||
return "singleton(nonunique)";
|
||||
case DimLevelType::SingletonNo:
|
||||
return "singleton_no";
|
||||
return "singleton(nonordered)";
|
||||
case DimLevelType::SingletonNuNo:
|
||||
return "singleton_nu_no";
|
||||
return "singleton(nonunique, nonordered)";
|
||||
case DimLevelType::LooseCompressed:
|
||||
return "loose_compressed";
|
||||
case DimLevelType::LooseCompressedNu:
|
||||
return "loose_compressed_nu";
|
||||
return "loose_compressed(nonunique)";
|
||||
case DimLevelType::LooseCompressedNo:
|
||||
return "loose_compressed_no";
|
||||
return "loose_compressed(nonordered)";
|
||||
case DimLevelType::LooseCompressedNuNo:
|
||||
return "loose_compressed_nu_no";
|
||||
return "loose_compressed(nonunique, nonordered)";
|
||||
case DimLevelType::TwoOutOfFour:
|
||||
return "compressed24";
|
||||
return "block2_4";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
@ -422,6 +422,14 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
|
||||
std::optional<uint64_t> getStaticLvlSliceOffset(::mlir::sparse_tensor::Level lvl) const;
|
||||
std::optional<uint64_t> getStaticLvlSliceSize(::mlir::sparse_tensor::Level lvl) const;
|
||||
std::optional<uint64_t> getStaticLvlSliceStride(::mlir::sparse_tensor::Level lvl) const;
|
||||
|
||||
//
|
||||
// Printing methods.
|
||||
//
|
||||
|
||||
void printSymbols(AffineMap &map, AsmPrinter &printer) const;
|
||||
void printDimensions(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const;
|
||||
void printLevels(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::DimLevelType> lvlTypes) const;
|
||||
}];
|
||||
|
||||
let genVerifyDecl = 1;
|
||||
|
@ -586,33 +586,69 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
|
||||
}
|
||||
|
||||
void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
|
||||
// Print the struct-like storage in dictionary fashion.
|
||||
printer << "<{ lvlTypes = [ ";
|
||||
llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
|
||||
printer << "\"" << toMLIRString(dlt) << "\"";
|
||||
});
|
||||
printer << " ]";
|
||||
auto map = static_cast<AffineMap>(getDimToLvl());
|
||||
// Empty affine map indicates identity map
|
||||
if (!map)
|
||||
map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), getContext());
|
||||
printer << "<{ map = ";
|
||||
printSymbols(map, printer);
|
||||
printer << '(';
|
||||
printDimensions(map, printer, getDimSlices());
|
||||
printer << ") -> (";
|
||||
printLevels(map, printer, getLvlTypes());
|
||||
printer << ')';
|
||||
// Print remaining members only for non-default values.
|
||||
if (!isIdentity())
|
||||
printer << ", dimToLvl = affine_map<" << getDimToLvl() << ">";
|
||||
if (getPosWidth())
|
||||
printer << ", posWidth = " << getPosWidth();
|
||||
if (getCrdWidth())
|
||||
printer << ", crdWidth = " << getCrdWidth();
|
||||
if (!getDimSlices().empty()) {
|
||||
printer << ", dimSlices = [ ";
|
||||
llvm::interleaveComma(getDimSlices(), printer,
|
||||
[&](SparseTensorDimSliceAttr attr) {
|
||||
// Calls SparseTensorDimSliceAttr::print directly to
|
||||
// skip mnemonic.
|
||||
attr.print(printer);
|
||||
});
|
||||
printer << " ]";
|
||||
}
|
||||
|
||||
printer << " }>";
|
||||
}
|
||||
|
||||
void SparseTensorEncodingAttr::printSymbols(AffineMap &map,
|
||||
AsmPrinter &printer) const {
|
||||
if (map.getNumSymbols() == 0)
|
||||
return;
|
||||
printer << '[';
|
||||
for (unsigned i = 0, n = map.getNumSymbols() - 1; i < n; i++)
|
||||
printer << 's' << i << ", ";
|
||||
if (map.getNumSymbols() >= 1)
|
||||
printer << 's' << map.getNumSymbols() - 1;
|
||||
printer << ']';
|
||||
}
|
||||
|
||||
void SparseTensorEncodingAttr::printDimensions(
|
||||
AffineMap &map, AsmPrinter &printer,
|
||||
ArrayRef<SparseTensorDimSliceAttr> dimSlices) const {
|
||||
if (!dimSlices.empty()) {
|
||||
for (unsigned i = 0, n = map.getNumDims() - 1; i < n; i++)
|
||||
printer << 'd' << i << " : " << dimSlices[i] << ", ";
|
||||
if (map.getNumDims() >= 1) {
|
||||
printer << 'd' << map.getNumDims() - 1 << " : "
|
||||
<< dimSlices[map.getNumDims() - 1];
|
||||
}
|
||||
} else {
|
||||
for (unsigned i = 0, n = map.getNumDims() - 1; i < n; i++)
|
||||
printer << 'd' << i << ", ";
|
||||
if (map.getNumDims() >= 1)
|
||||
printer << 'd' << map.getNumDims() - 1;
|
||||
}
|
||||
}
|
||||
|
||||
void SparseTensorEncodingAttr::printLevels(
|
||||
AffineMap &map, AsmPrinter &printer,
|
||||
ArrayRef<DimLevelType> lvlTypes) const {
|
||||
for (unsigned i = 0, n = map.getNumResults() - 1; i < n; i++) {
|
||||
map.getResult(i).print(printer.getStream());
|
||||
printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
|
||||
}
|
||||
if (map.getNumResults() >= 1) {
|
||||
auto lastIndex = map.getNumResults() - 1;
|
||||
map.getResult(lastIndex).print(printer.getStream());
|
||||
printer << " : " << toMLIRString(lvlTypes[lastIndex]);
|
||||
}
|
||||
}
|
||||
|
||||
LogicalResult
|
||||
SparseTensorEncodingAttr::verify(function_ref<InFlightDiagnostic()> emitError,
|
||||
ArrayRef<DimLevelType> lvlTypes,
|
||||
|
@ -472,8 +472,17 @@ public:
|
||||
llvm::raw_svector_ostream nameOstream(nameBuffer);
|
||||
nameOstream << kInsertFuncNamePrefix;
|
||||
const Level lvlRank = stt.getLvlRank();
|
||||
for (Level l = 0; l < lvlRank; l++)
|
||||
nameOstream << toMLIRString(stt.getLvlType(l)) << "_";
|
||||
for (Level l = 0; l < lvlRank; l++) {
|
||||
std::string lvlType = toMLIRString(stt.getLvlType(l));
|
||||
// Replace/remove punctuations in level properties.
|
||||
std::replace_if(
|
||||
lvlType.begin(), lvlType.end(),
|
||||
[](char c) { return c == '(' || c == ','; }, '_');
|
||||
lvlType.erase(std::remove_if(lvlType.begin(), lvlType.end(),
|
||||
[](char c) { return c == ')' || c == ' '; }),
|
||||
lvlType.end());
|
||||
nameOstream << lvlType << "_";
|
||||
}
|
||||
// Static dim sizes are used in the generated code while dynamic sizes are
|
||||
// loaded from the dimSizes buffer. This is the reason for adding the shape
|
||||
// to the function name.
|
||||
|
@ -507,7 +507,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
|
||||
return %1 : tensor<8x8xf64, #CSR>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func.func private @_insert_dense_compressed_no_8_8_f64_0_0(
|
||||
// CHECK-LABEL: func.func private @_insert_dense_compressed_nonordered_8_8_f64_0_0(
|
||||
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
|
||||
@ -533,7 +533,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
|
||||
// CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
|
||||
// CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref<?xindex>
|
||||
// CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
|
||||
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
|
||||
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_nonordered_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
|
||||
// CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
|
||||
// CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref<?xi1>
|
||||
// CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
|
||||
@ -611,7 +611,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
|
||||
return %1 : tensor<128xf64, #SparseVector>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func.func private @_insert_compressed_nu_singleton_5_6_f64_0_0(
|
||||
// CHECK-LABEL: func.func private @_insert_compressed_nonunique_singleton_5_6_f64_0_0(
|
||||
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
|
||||
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
|
||||
@ -627,7 +627,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
|
||||
// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
|
||||
// CHECK-SAME: %[[A4:.*4]]: index,
|
||||
// CHECK-SAME: %[[A5:.*5]]: f64)
|
||||
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nu_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
|
||||
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nonunique_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
|
||||
// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
|
||||
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
|
||||
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>
|
||||
|
@ -1,7 +1,7 @@
|
||||
// RUN: mlir-opt %s -split-input-file | mlir-opt | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func private @sparse_1d_tensor(
|
||||
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>)
|
||||
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)
|
||||
func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)
|
||||
|
||||
// -----
|
||||
@ -13,7 +13,7 @@ func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_csr(
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], posWidth = 64, crdWidth = 64 }>>)
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth = 64, crdWidth = 64 }>>)
|
||||
func.func private @sparse_csr(tensor<?x?xf32, #CSR>)
|
||||
|
||||
// -----
|
||||
@ -23,7 +23,7 @@ func.func private @sparse_csr(tensor<?x?xf32, #CSR>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @CSR_explicit(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>>
|
||||
func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
|
||||
return
|
||||
}
|
||||
@ -37,7 +37,7 @@ func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_csc(
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>)
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed) }>>)
|
||||
func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
|
||||
|
||||
// -----
|
||||
@ -49,7 +49,7 @@ func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_dcsc(
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, crdWidth = 64 }>>)
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : compressed, d0 : compressed), crdWidth = 64 }>>)
|
||||
func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
|
||||
|
||||
// -----
|
||||
@ -59,7 +59,7 @@ func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_coo(
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu_no", "singleton_no" ] }>>)
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered)) }>>)
|
||||
func.func private @sparse_coo(tensor<?x?xf32, #COO>)
|
||||
|
||||
// -----
|
||||
@ -69,7 +69,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_bcoo(
|
||||
// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "loose_compressed_nu", "singleton" ] }>>)
|
||||
// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }>>)
|
||||
func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
|
||||
|
||||
// -----
|
||||
@ -79,7 +79,7 @@ func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_sorted_coo(
|
||||
// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>)
|
||||
// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }>>)
|
||||
func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
|
||||
|
||||
// -----
|
||||
@ -94,7 +94,7 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_bcsr(
|
||||
// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
|
||||
// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
|
||||
func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
|
||||
|
||||
|
||||
@ -105,7 +105,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_ell(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], dimToLvl = affine_map<(d0, d1)[s0] -> (d0 * (s0 * 4), d0, d1)> }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed) }>>
|
||||
func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
|
||||
|
||||
// -----
|
||||
@ -115,7 +115,7 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_slice(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
|
||||
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
|
||||
|
||||
// -----
|
||||
@ -125,7 +125,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_slice(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, ?, 1), (?, 4, 2) ] }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
|
||||
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
|
||||
|
||||
// -----
|
||||
@ -138,7 +138,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_2_out_of_4(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed24" ] }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : block2_4) }>>
|
||||
func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)
|
||||
|
||||
// -----
|
||||
@ -153,7 +153,7 @@ func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @BCSR(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
|
||||
func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
|
||||
return
|
||||
}
|
||||
@ -174,7 +174,7 @@ func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @BCSR_explicit(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
|
||||
func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
|
||||
return
|
||||
}
|
||||
@ -190,7 +190,7 @@ func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @NV_24(
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed24" ], dimToLvl = affine_map<(d0, d1) -> (d0, d1 floordiv 4, d1 mod 4)> }>>
|
||||
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 floordiv 4 : dense, d1 mod 4 : block2_4) }>>
|
||||
func.func private @NV_24(%arg0: tensor<?x?xf64, #NV_24>) {
|
||||
return
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
|
||||
//
|
||||
// CHECK-LABEL: func.func @sparse_expand(
|
||||
// CHECK-SAME: %[[S:.*]]:
|
||||
// CHECK-SAME: %[[S:.*0]]:
|
||||
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
|
||||
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
|
||||
@ -53,7 +53,7 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
|
||||
// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
|
||||
//
|
||||
// CHECK-LABEL: func.func @sparse_collapse(
|
||||
// CHECK-SAME: %[[S:.*]]:
|
||||
// CHECK-SAME: %[[S:.*0]]:
|
||||
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
|
||||
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
|
||||
@ -99,7 +99,7 @@ func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<10
|
||||
// CHECK-ROUND: return %[[E]] : tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
|
||||
//
|
||||
// CHECK-LABEL: func.func @dynamic_sparse_expand(
|
||||
// CHECK-SAME: %[[S:.*]]:
|
||||
// CHECK-SAME: %[[S:.*0]]:
|
||||
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
|
||||
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
|
||||
@ -142,7 +142,7 @@ func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>) -> tensor<
|
||||
// CHECK-ROUND: return %[[C]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
|
||||
//
|
||||
// CHECK-LABEL: func.func @dynamic_sparse_collapse(
|
||||
// CHECK-SAME: %[[S:.*]]:
|
||||
// CHECK-SAME: %[[S:.*0]]:
|
||||
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
|
||||
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
|
||||
|
@ -4,7 +4,7 @@
|
||||
#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
|
||||
|
||||
// CHECK: func.func @sparse_reshape(
|
||||
// CHECK-SAME: %[[S:.*]]:
|
||||
// CHECK-SAME: %[[S:.*0]]:
|
||||
// CHECK-DAG: %[[C25:.*]] = arith.constant 25 : index
|
||||
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
|
||||
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
|
||||
|
@ -21,7 +21,7 @@ def testEncodingAttr1D():
|
||||
" crdWidth = 32"
|
||||
"}>"
|
||||
)
|
||||
# CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 16, crdWidth = 32 }>
|
||||
# CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 16, crdWidth = 32 }>
|
||||
print(parsed)
|
||||
|
||||
casted = st.EncodingAttr(parsed)
|
||||
@ -38,7 +38,7 @@ def testEncodingAttr1D():
|
||||
print(f"crd_width: {casted.crd_width}")
|
||||
|
||||
created = st.EncodingAttr.get(casted.lvl_types, None, 0, 0)
|
||||
# CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>
|
||||
# CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
|
||||
print(created)
|
||||
# CHECK: created_equal: False
|
||||
print(f"created_equal: {created == casted}")
|
||||
@ -61,7 +61,7 @@ def testEncodingAttr2D():
|
||||
" crdWidth = 32"
|
||||
"}>"
|
||||
)
|
||||
# CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }>
|
||||
# CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }>
|
||||
print(parsed)
|
||||
|
||||
casted = st.EncodingAttr(parsed)
|
||||
@ -77,10 +77,8 @@ def testEncodingAttr2D():
|
||||
# CHECK: crd_width: 32
|
||||
print(f"crd_width: {casted.crd_width}")
|
||||
|
||||
created = st.EncodingAttr.get(
|
||||
casted.lvl_types, casted.dim_to_lvl, 8, 32
|
||||
)
|
||||
# CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }>
|
||||
created = st.EncodingAttr.get(casted.lvl_types, casted.dim_to_lvl, 8, 32)
|
||||
# CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }>
|
||||
print(created)
|
||||
# CHECK: created_equal: True
|
||||
print(f"created_equal: {created == casted}")
|
||||
@ -100,8 +98,8 @@ def testEncodingAttrOnTensorType():
|
||||
)
|
||||
)
|
||||
tt = RankedTensorType.get((1024,), F32Type.get(), encoding=encoding)
|
||||
# CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 32 }>>
|
||||
# CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 64, crdWidth = 32 }>>
|
||||
print(tt)
|
||||
# CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 32 }>
|
||||
# CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 64, crdWidth = 32 }>
|
||||
print(tt.encoding)
|
||||
assert tt.encoding == encoding
|
||||
|
Loading…
Reference in New Issue
Block a user