mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-23 22:00:10 +00:00
Released restriction that prevented implicit dynamic-to-static dimension type cast in TOSA ops.
Reviewed By: jpienaar, gflegar Differential Revision: https://reviews.llvm.org/D156714
This commit is contained in:
parent
ea90e289d9
commit
bd077e98e4
@ -69,17 +69,17 @@ Given the shapes of two ranked input operands, the result's shape is inferred by
|
||||
```python
|
||||
InferShape(shape0, shape1):
|
||||
|
||||
# Equalize ranks
|
||||
rank = max(GetRank(shape0), GetRank(shape1))
|
||||
ExpandRank(shape0, rank)
|
||||
ExpandRank(shape1, rank)
|
||||
# Equalize ranks
|
||||
rank = max(GetRank(shape0), GetRank(shape1))
|
||||
ExpandRank(shape0, rank)
|
||||
ExpandRank(shape1, rank)
|
||||
|
||||
# Infer shape
|
||||
inferredShape = []
|
||||
for (dim0, dim1) in zip(shape0, shape1):
|
||||
inferredDim = InferDim(dim0, dim1)
|
||||
inferredShape.append(inferredDim)
|
||||
return inferredShape
|
||||
# Infer shape
|
||||
inferredShape = []
|
||||
for (dim0, dim1) in zip(shape0, shape1):
|
||||
inferredDim = InferDim(dim0, dim1)
|
||||
inferredShape.append(inferredDim)
|
||||
return inferredShape
|
||||
```
|
||||
|
||||
The result shape for an operation with an arbitrary number of input operands is then inferred by discarding unranked operands, applying shape inference on the first ranked operand pair, and updating the inferred shape with each additional ranked operand. If the operation has no ranked operands, the result shape cannot be inferred. If the operation has exactly one ranked operand, its shape is directly provided as the inferred result shape. Formally:
|
||||
@ -111,7 +111,7 @@ Once a rank match is guaranteed, each dimension of the inferred shape is compare
|
||||
| `inferredDim` | `actualDim` | Verification outcome |
|
||||
| ------------- | ----------- | -------------------- |
|
||||
| ? | ? | **OK** |
|
||||
| ? | static | **Error** <br> An inferred dimension being dynamic indicates that its size cannot be inferred at compile time from its input operands. The presence of a static dimension in the actual result is counterintuitive and is therefore not allowed. |
|
||||
| ? | static | **OK** <br> A failure to guarantee that the runtime dimension size of the result is equal to `actualDim` causes undefined behavior. While unusual, this implicit dynamic-to-static cast is convenient in certain scenarios, such as an intermediate state of a shape inference pass. Ultimately, a static dimension in the result implies that all input dimension sizes are also known at compile time and may therefore become static as well, preferably. |
|
||||
| static | ? | **OK** <br> The actual result dimension may be dynamic even when a static size can be inferred at compile time. The programmer may choose to relax the specificity of the result dimension for forward compatibility of the result type. |
|
||||
| static | static | **OK if equal** <br> When both the inferred and actual dimensions are static, they must be set to the same size. |
|
||||
|
||||
@ -134,7 +134,6 @@ Verify(op):
|
||||
|
||||
# Verify
|
||||
for (inferredDim, actualDim) in zip(inferredShape, actualShape):
|
||||
ERROR_IF(IsDynamic(inferredDim) and IsStatic(actualDim))
|
||||
ERROR_IF(IsStatic(actualDim) and inferredDim != actualDim)
|
||||
```
|
||||
|
||||
@ -195,3 +194,5 @@ The following are incorrect uses of broadcastable ops:
|
||||
// tensor<4xi32>. Broadcast semantics are not applicable for results.
|
||||
%result = "test.broadcastable"(%arg0, %arg1) : (tensor<1xi32>, tensor<1xi32) -> tensor<4xi32>
|
||||
```
|
||||
|
||||
|
||||
|
@ -195,17 +195,10 @@ static std::tuple<bool, bool> hasTensorOrVectorType(iterator_range types) {
|
||||
|
||||
static bool isCompatibleInferredReturnShape(ArrayRef<int64_t> inferred,
|
||||
ArrayRef<int64_t> existing) {
|
||||
// If both interred and existing dimensions are static, they must be equal.
|
||||
auto isCompatible = [](int64_t inferredDim, int64_t existingDim) {
|
||||
// The following criterion is used to determine the validity of an existing
|
||||
// dimension:
|
||||
//
|
||||
// inferredDim existingDim Behavior
|
||||
// ----------- ----------- --------
|
||||
// dynamic dynamic OK
|
||||
// dynamic static Error
|
||||
// static dynamic OK
|
||||
// static static OK if equal
|
||||
return ShapedType::isDynamic(existingDim) || inferredDim == existingDim;
|
||||
return ShapedType::isDynamic(existingDim) ||
|
||||
ShapedType::isDynamic(inferredDim) || inferredDim == existingDim;
|
||||
};
|
||||
if (inferred.size() != existing.size())
|
||||
return false;
|
||||
|
@ -20,24 +20,45 @@ func.func @test_abs_scalar(%arg0: tensor<f32>) -> tensor<f32> {
|
||||
// -----
|
||||
|
||||
// CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
|
||||
// CHECK-LABEL: @test_abs_1d_cast_result
|
||||
// CHECK-LABEL: @test_abs_1d_cast_static_to_dynamic
|
||||
// CHECK-SAME: ([[ARG0:%[0-9a-zA-Z_]*]]
|
||||
func.func @test_abs_1d_cast_result(%arg0: tensor<5xf32>) -> tensor<?xf32> {
|
||||
func.func @test_abs_1d_cast_static_to_dynamic(%arg0: tensor<5xf32>) -> tensor<?xf32> {
|
||||
// CHECK: [[EMPTY:%.+]] = tensor.empty() : tensor<5xf32>
|
||||
// CHECK: [[RESULT:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins([[ARG0]] : tensor<5xf32>) outs([[EMPTY]] : tensor<5xf32>) {
|
||||
// CHECK: ^bb0([[IN0:%.+]]: f32, [[OUT0:%.+]]: f32):
|
||||
// CHECK: [[ABS:%.+]] = math.absf [[IN0]] : f32
|
||||
// CHECK: linalg.yield [[ABS]] : f32
|
||||
// CHECK: } -> tensor<5xf32>
|
||||
// CHECK: [[CAST_RESULT:%.+]] = tensor.cast [[RESULT]] : tensor<5xf32> to tensor<?xf32>
|
||||
%0 = "tosa.abs"(%arg0) : (tensor<5xf32>) -> tensor<?xf32>
|
||||
|
||||
// CHECK: [[CAST_RESULT:%.+]] = tensor.cast [[RESULT]] : tensor<5xf32> to tensor<?xf32>
|
||||
// CHECK: return [[CAST_RESULT]] : tensor<?xf32>
|
||||
return %0 : tensor<?xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
|
||||
// CHECK-LABEL: @test_abs_1d_cast_dynamic_to_static
|
||||
// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]
|
||||
func.func @test_abs_1d_cast_dynamic_to_static(%arg0: tensor<?xf32>) -> tensor<5xf32> {
|
||||
// CHECK: %[[ZERO:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[DIM_SIZE:.*]] = tensor.dim %[[ARG0]], %[[ZERO]] : tensor<?xf32>
|
||||
// CHECK: %[[EMPTY:.*]] = tensor.empty(%[[DIM_SIZE]]) : tensor<?xf32>
|
||||
// CHECK: %[[RESULT:.*]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%[[ARG0]] : tensor<?xf32>) outs(%[[EMPTY]] : tensor<?xf32>) {
|
||||
// CHECK: ^bb0(%[[VAL_0:.*]]: f32, %[[VAL_1:.*]]: f32):
|
||||
// CHECK: %[[VAL_2:.*]] = math.absf %[[VAL_0]] : f32
|
||||
// CHECK: linalg.yield %[[VAL_2]] : f32
|
||||
// CHECK: } -> tensor<?xf32>
|
||||
// CHECK: %[[CAST_RESULT:.*]] = tensor.cast %[[RESULT]] : tensor<?xf32> to tensor<5xf32>
|
||||
%0 = "tosa.abs"(%arg0) : (tensor<?xf32>) -> tensor<5xf32>
|
||||
|
||||
// CHECK: return %[[CAST_RESULT]] : tensor<5xf32>
|
||||
return %0 : tensor<5xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK: #[[$MAP0:.*]] = affine_map<(d0) -> (d0)>
|
||||
// CHECK-LABEL: @test_abs_1d_dynamic
|
||||
// CHECK-SAME: ([[ARG0:%[0-9a-zA-Z_]*]]
|
||||
|
@ -111,9 +111,10 @@ func.func @broadcast_tensor_tensor_tensor(tensor<4x3x2xi32>, tensor<?xi32>) -> t
|
||||
|
||||
// -----
|
||||
|
||||
// Error for inferred dynamic dimension but existing static dimensions
|
||||
// It is alright to have an implicit dynamic-to-static cast in a dimension size
|
||||
// as long as the runtime result size is consistent with the result tensor's
|
||||
// static dimension.
|
||||
func.func @broadcast_tensor_tensor_tensor(%arg0: tensor<?xi32>, %arg1: tensor<?xi32>) -> tensor<2xi32> {
|
||||
// expected-error @+1 {{op result type '2' not broadcast compatible with broadcasted operands's shapes '?'}}
|
||||
%0 = "test.broadcastable"(%arg0, %arg1) : (tensor<?xi32>, tensor<?xi32>) -> tensor<2xi32>
|
||||
return %0 : tensor<2xi32>
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user