[mlir][sparse] code formatting (NFC) (#74779)

This commit is contained in:
Aart Bik 2023-12-07 15:46:24 -08:00 committed by GitHub
parent 42bba97fc2
commit 7003e255d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 13 additions and 15 deletions

View File

@ -41,8 +41,8 @@
module {
func.func @conv2d(%input: tensor<8x8xi32>,
%filter: tensor<3x3xi32>,
%output: tensor<6x6xi32>) -> tensor<6x6xi32> {
%filter: tensor<3x3xi32>,
%output: tensor<6x6xi32>) -> tensor<6x6xi32> {
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
outs (%output: tensor<6x6xi32>) -> tensor<6x6xi32>
@ -50,7 +50,7 @@ module {
}
func.func @conv2d_all_sparse_DCSR(%input: tensor<8x8xi32, #DCSR>,
%filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
%filter: tensor<3x3xi32, #DCSR>) -> tensor<6x6xi32, #DCSR> {
%s = tensor.empty() : tensor<6x6xi32, #DCSR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32, #DCSR>)
@ -59,7 +59,7 @@ module {
}
func.func @conv2d_all_sparse_CSR(%input: tensor<8x8xi32, #CSR>,
%filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
%filter: tensor<3x3xi32, #CSR>) -> tensor<6x6xi32, #CSR> {
%s = tensor.empty() : tensor<6x6xi32, #CSR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32, #CSR>)
@ -68,7 +68,7 @@ module {
}
func.func @conv2d_all_sparse_CD(%input: tensor<8x8xi32, #CDR>,
%filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
%filter: tensor<3x3xi32, #CDR>) -> tensor<6x6xi32, #CDR> {
%s = tensor.empty() : tensor<6x6xi32, #CDR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32, #CDR>)
@ -77,7 +77,7 @@ module {
}
func.func @conv2d_all_sparse_CSC(%input: tensor<8x8xi32, #CSC>,
%filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
%filter: tensor<3x3xi32, #CSC>) -> tensor<6x6xi32, #CSC> {
%s = tensor.empty() : tensor<6x6xi32, #CSC>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32, #CSC>)

View File

@ -46,8 +46,8 @@
module {
func.func @conv2d(%input: tensor<8x8xi32>,
%filter: tensor<3x3xi32>,
%output: tensor<6x6xi32>) -> tensor<6x6xi32> {
%filter: tensor<3x3xi32>,
%output: tensor<6x6xi32>) -> tensor<6x6xi32> {
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
outs (%output: tensor<6x6xi32>) -> tensor<6x6xi32>
@ -70,7 +70,7 @@ module {
}
func.func @conv2d_sparse_out(%input: tensor<8x8xi32>,
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
%s = tensor.empty() : tensor<6x6xi32, #DCSR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32>, tensor<3x3xi32>)
@ -79,7 +79,7 @@ module {
}
func.func @conv2d_all_sparse_DCSR(%input: tensor<8x8xi32, #DCSR>,
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
%s = tensor.empty() : tensor<6x6xi32, #DCSR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #DCSR>, tensor<3x3xi32>)
@ -88,7 +88,7 @@ module {
}
func.func @conv2d_all_sparse_CSR(%input: tensor<8x8xi32, #CSR>,
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSR> {
%s = tensor.empty() : tensor<6x6xi32, #CSR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #CSR>, tensor<3x3xi32>)
@ -97,7 +97,7 @@ module {
}
func.func @conv2d_all_sparse_CD(%input: tensor<8x8xi32, #CDR>,
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CDR> {
%s = tensor.empty() : tensor<6x6xi32, #CDR>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #CDR>, tensor<3x3xi32>)
@ -106,7 +106,7 @@ module {
}
func.func @conv2d_all_sparse_CSC(%input: tensor<8x8xi32, #CSC>,
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
%filter: tensor<3x3xi32>) -> tensor<6x6xi32, #CSC> {
%s = tensor.empty() : tensor<6x6xi32, #CSC>
%0 = linalg.conv_2d
ins (%input, %filter: tensor<8x8xi32, #CSC>, tensor<3x3xi32>)
@ -125,7 +125,6 @@ module {
[ -1, 0, 1 ]
]> : tensor<3x3xi32>
%input = arith.constant dense<[
[ 1, 2, 3, 4, 0, 6, 7, 8 ],
[ 2, 2, 4, 4, 0, 0, 6, 8 ],
@ -270,7 +269,6 @@ module {
: tensor<6x6xi32>, vector<6x6xi32>
vector.print %v : vector<6x6xi32>
// Release the resources.
bufferization.dealloc_tensor %sparse_input_DCSR : tensor<8x8xi32, #DCSR>
bufferization.dealloc_tensor %sparse_input_CSR : tensor<8x8xi32, #CSR>