head file change

Signed-off-by: wangchuanxia <wangchuanxia@huawei.com>
This commit is contained in:
wangchuanxia 2023-12-22 15:12:11 +08:00
parent f2f5f91422
commit 26114b906b
5 changed files with 580 additions and 529 deletions

View File

@ -33,7 +33,6 @@ ohos_ndk_library("libneural_network_runtime_ndk") {
system_capability = "SystemCapability.AI.NeuralNetworkRuntime"
system_capability_headers = [
"neural_network_runtime.h",
"neural_network_runtime_compat.h",
"neural_network_runtime_type.h",
]
}
@ -43,7 +42,6 @@ ohos_ndk_headers("libneural_network_runtime_header") {
sources = [
"neural_network_core.h",
"neural_network_runtime.h",
"neural_network_runtime_compat.h",
"neural_network_runtime_type.h",
]
}

View File

@ -26,13 +26,14 @@
/**
* @file neural_network_core.h
*
* @brief Defines the Neural Network Core APIs. The AI inference framework uses the Native APIs provided by Neural Network Core
* to compile models and perform inference and computing on acceleration hardware.
*
* @brief Defines the Neural Network Core APIs. The AI inference framework uses the Native APIs provided by
* Neural Network Core to compile models and perform inference and computing on acceleration hardware.
*
* Note: Currently, the APIs of Neural Network Core do not support multi-thread calling. \n
*
* include "neural_network_runtime/neural_network_core.h"
* @library libneural_network_core.so
* @kit Neural Network Runtime Kit
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 11
* @version 1.0
@ -50,19 +51,19 @@ extern "C" {
/**
* @brief Creates a compilation instance of the {@link OH_NNCompilation} type.
*
* After the OH_NNModel module completes model construction, APIs provided by the OH_NNCompilation module pass the model
* to underlying device for compilation. This method creates a {@link OH_NNCompilation} instance
* based on the passed {@link OH_NNModel} instance. The {@link OH_NNCompilation_SetDevice} method is called
* to set the device to compile on, and {@link OH_NNCompilation_Build} is then called to complete compilation. \n
* After the OH_NNModel module completes model construction, APIs provided by the OH_NNCompilation module pass the
* model to underlying device for compilation. This method creates a {@link OH_NNCompilation} instance
* based on the passed {@link OH_NNModel} instance. The {@link OH_NNCompilation_SetDevice} method is called
* to set the device to compile on, and {@link OH_NNCompilation_Build} is then called to complete compilation.\n
*
* In addition to computing device selection, the OH_NNCompilation module supports features such as model caching, performance preference,
* priority setting, and float16 computing, which can be implemented by the following methods:
* - {@link OH_NNCompilation_SetCache}
* - {@link OH_NNCompilation_SetPerformanceMode}
* - {@link OH_NNCompilation_SetPriority}
* - {@link OH_NNCompilation_EnableFloat16} \n
* In addition to computing device selection, the OH_NNCompilation module supports features such as model caching,
* performance preference, priority setting, and float16 computing, which can be implemented by the following methods:\n
* {@link OH_NNCompilation_SetCache}\n
* {@link OH_NNCompilation_SetPerformanceMode}\n
* {@link OH_NNCompilation_SetPriority}\n
* {@link OH_NNCompilation_EnableFloat16}\n
*
* After {@link OH_NNCompilation_Build} is called, the {@link OH_NNModel} instance can be released. \n
* After {@link OH_NNCompilation_Build} is called, the {@link OH_NNModel} instance can be released.\n
*
* @param model Pointer to the {@link OH_NNModel} instance.
* @return Pointer to a {@link OH_NNCompilation} instance, or NULL if it fails to create.
@ -110,10 +111,12 @@ OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelBuffer(const void *m
/**
* @brief Creates a empty compilation instance for restoration from cache later.
*
* The restoration time from the cache is less than compilation with {@link OH_NNModel}. \n
* See {@link OH_NNCompilation_SetCache} for the description of cache.\n
*
* The restoration time from the cache is less than compilation with {@link OH_NNModel}.\n
*
* You should call {@link OH_NNCompilation_SetCache} or {@link OH_NNCompilation_ImportCacheFromBuffer} first,
* and then call {@link OH_NNCompilation_Build} to complete the restoration. \n
* You should call {@link OH_NNCompilation_SetCache} or {@link OH_NNCompilation_ImportCacheFromBuffer} first,
* and then call {@link OH_NNCompilation_Build} to complete the restoration.\n
*
* @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create.
* @since 11
@ -124,15 +127,18 @@ OH_NNCompilation *OH_NNCompilation_ConstructForCache();
/**
* @brief Exports the cache to a given buffer.
*
* The cache is the result of compilation building {@link OH_NNCompilation_Build},
* so that this method must be called after {@link OH_NNCompilation_Build}. \n
* See {@link OH_NNCompilation_SetCache} for the description of cache.\n
*
* Note that the cache is the result of compilation building {@link OH_NNCompilation_Build},
* so that this method must be called after {@link OH_NNCompilation_Build}.\n
*
* @param compilation Pointer to the {@link OH_NNCompilation} instance.
* @param buffer Pointer to the given buffer.
* @param length Buffer length.
* @param modelSize Byte size of the model cache.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -144,16 +150,20 @@ OH_NN_ReturnCode OH_NNCompilation_ExportCacheToBuffer(OH_NNCompilation *compilat
/**
* @brief Imports the cache from a given buffer.
*
* {@link OH_NNCompilation_Build} should be called to complete the restoration after {@link OH_NNCompilation_ImportCacheFromBuffer} is called. \n
*
* Note that <b>compilation</b> only saves the <b>buffer</b> pointer inside, instead of copying its data. You should not
* release <b>buffer</b> before <b>compilation</b> is destroied. \n
* See {@link OH_NNCompilation_SetCache} for the description of cache.\n
*
* {@link OH_NNCompilation_Build} should be called to complete the restoration after
* {@link OH_NNCompilation_ImportCacheFromBuffer} is called.\n
*
* Note that <b>compilation</b> only saves the <b>buffer</b> pointer inside, instead of copying its data. You should not
* release <b>buffer</b> before <b>compilation</b> is destroied.\n
*
* @param compilation Pointer to the {@link OH_NNCompilation} instance.
* @param buffer Pointer to the given buffer.
* @param modelSize Byte size of the model cache.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -332,15 +342,17 @@ void OH_NNCompilation_Destroy(OH_NNCompilation **compilation);
/**
* @brief Creates an {@link NN_TensorDesc} instance.
*
* The {@link NN_TensorDesc} describes various tensor attributes, such as name/data type/shape/format, etc. \n
*
* The following methods can be called to create a {@link NN_Tensor} instance based on the passed {@link NN_TensorDesc} instance:
* - {@link OH_NNTensor_Create}
* - {@link OH_NNTensor_CreateWithSize}
* - {@link OH_NNTensor_CreateWithFd} \n
* Note that these methods will copy the {@link NN_TensorDesc} instance into {@link NN_Tensor}. Therefore you can create multiple
* {@link NN_Tensor} instances with the same {@link NN_TensorDesc} instance. And you should destroy the {@link NN_TensorDesc} instance
* by {@link OH_NNTensorDesc_Destroy} when it is no longer used. \n
* The {@link NN_TensorDesc} describes various tensor attributes, such as name/data type/shape/format, etc.\n
*
* The following methods can be called to create a {@link NN_Tensor} instance based on the passed {@link NN_TensorDesc}
* instance:\n
* {@link OH_NNTensor_Create}\n
* {@link OH_NNTensor_CreateWithSize}\n
* {@link OH_NNTensor_CreateWithFd}\n
*
* Note that these methods will copy the {@link NN_TensorDesc} instance into {@link NN_Tensor}. Therefore you can create
* multiple {@link NN_Tensor} instances with the same {@link NN_TensorDesc} instance. And you should destroy the
* {@link NN_TensorDesc} instance by {@link OH_NNTensorDesc_Destroy} when it is no longer used.\n
*
* @return Pointer to a {@link NN_TensorDesc} instance, or NULL if it fails to create.
* @since 11
@ -367,15 +379,16 @@ OH_NN_ReturnCode OH_NNTensorDesc_Destroy(NN_TensorDesc **tensorDesc);
/**
* @brief Sets the name of a {@link NN_TensorDesc}.
*
* After the {@link NN_TensorDesc} instance is created, call this method to set the tensor name.
* The value of <b>(*name)</b> is a C-style string ended with <b>'\0'</b>. \n
* After the {@link NN_TensorDesc} instance is created, call this method to set the tensor name.
* The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>.\n
*
* if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code. \n
* if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code.\n
*
* @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
* @param name The name of the tensor that needs to be set.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -384,19 +397,20 @@ OH_NN_ReturnCode OH_NNTensorDesc_SetName(NN_TensorDesc *tensorDesc, const char *
/**
* @brief Gets the name of a {@link NN_TensorDesc}.
*
* Call this method to obtain the name of the specified {@link NN_TensorDesc} instance.
* The value of <b>(*name)</b> is a C-style string ended with <b>'\0'</b>. \n
*
* Call this method to obtain the name of the specified {@link NN_TensorDesc} instance.
* The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>.\n
*
* if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code.
* As an output parameter, <b>*name</b> must be a null pointer, otherwise the method will return an error code.
* Fou example, you should define char* tensorName = NULL, and pass &tensorName as the argument of <b>name</b>. \n
*
* You do not need to release the memory of <b>name</b>. It will be released when <b>tensorDesc</b> is destroied. \n
* As an output parameter, <b>*name</b> must be a null pointer, otherwise the method will return an error code.
* Fou example, you should define char* tensorName = NULL, and pass &tensorName as the argument of <b>name</b>.\n
*
* You do not need to release the memory of <b>name</b>. It will be released when <b>tensorDesc</b> is destroied.\n
*
* @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
* @param name The retured name of the tensor.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -546,19 +560,20 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetByteSize(const NN_TensorDesc *tensorDesc, si
/**
* @brief Creates a {@link NN_Tensor} instance from {@link NN_TensorDesc}.
*
* This method use {@link OH_NNTensorDesc_GetByteSize} to calculate the byte size of tensor data and allocate device memory for it.
* The device dirver will get the tensor data directly by the "zero-copy" way. \n
*
* Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy <b>tensorDesc</b>
* by {@link OH_NNTensorDesc_Destroy} when it is no longer used. \n
*
* If the tensor shape is dynamic, this method will return error code. \n
*
* <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used by default. \n
*
* <b>tensorDesc</b> must be provided, and this method will return an error code if it is a null pointer. \n
* This method use {@link OH_NNTensorDesc_GetByteSize} to calculate the byte size of tensor data and allocate shared
* memory on device for it. The device dirver will get the tensor data directly by the "zero-copy" way.\n
*
* Call {@link OH_NNTensor_DestroyTensor} to release the {@link NN_Tensor} instance. \n
* Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
* <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
*
* If the tensor shape is dynamic, this method will return error code.\n
*
* <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
* by default.\n
*
* <b>tensorDesc</b> must be provided, and this method will return an error code if it is a null pointer.\n
*
* Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
*
* @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
* @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
@ -566,24 +581,25 @@ OH_NN_ReturnCode OH_NNTensorDesc_GetByteSize(const NN_TensorDesc *tensorDesc, si
* @since 11
* @version 1.0
*/
NN_Tensor* OH_NNTensor_Create(size_t deviceID, NN_TensorDesc *tensorDesc);
NN_Tensor *OH_NNTensor_Create(size_t deviceID, NN_TensorDesc *tensorDesc);
/**
* @brief Creates a {@link NN_Tensor} instance with specified size.
* @brief Creates a {@link NN_Tensor} instance with specified size and {@link NN_TensorDesc}.
*
* This method use <b>size</b> as the byte size of tensor data and allocate device memory for it.
* The device dirver will get the tensor data directly by the "zero-copy" way. \n
*
* Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy <b>tensorDesc</b>
* by {@link OH_NNTensorDesc_Destroy} when it is no longer used. \n
*
* <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used by default. \n
*
* <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.
* <b>size</b> must be no less than the byte size of tensorDesc. Otherwise, this method will return an error code. If the tensor
* shape is dynamic, the <b>size</b> will not be checked. \n
* This method use <b>size</b> as the byte size of tensor data and allocate shared memory on device for it.
* The device dirver will get the tensor data directly by the "zero-copy" way.\n
*
* Call {@link OH_NNTensor_DestroyTensor} to release the {@link NN_Tensor} instance. \n
* Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
* <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
*
* <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
* by default.\n
*
* <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.
* <b>size</b> must be no less than the byte size of tensorDesc. Otherwise, this method will return an error code.
* If the tensor shape is dynamic, the <b>size</b> will not be checked.\n
*
* Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
*
* @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
* @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
@ -592,37 +608,35 @@ NN_Tensor* OH_NNTensor_Create(size_t deviceID, NN_TensorDesc *tensorDesc);
* @since 11
* @version 1.0
*/
NN_Tensor* OH_NNTensor_CreateWithSize(size_t deviceID, NN_TensorDesc *tensorDesc, size_t size);
NN_Tensor *OH_NNTensor_CreateWithSize(size_t deviceID, NN_TensorDesc *tensorDesc, size_t size);
/**
* @brief Creates a {@link NN_Tensor} instance with specified fd.
* @brief Creates a {@link NN_Tensor} instance with specified file descriptor and {@link NN_TensorDesc}.
*
* This method reuses the shared memory corresponding to the argument <b>fd</b> passed. It may comes from another {@link NN_Tensor} instance.
* When you call the {@link OH_NNTensor_DestroyTensor} method to release the tensor created by this method, the tensor data memory will not be released. \n
*
* Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy <b>tensorDesc</b>
* by {@link OH_NNTensorDesc_Destroy} when it is no longer used. \n
*
* Note that the <b>tensorDesc</b> will be released along with the {@link NN_Tensor} instance. Therefore every {@link NN_Tensor}
* instance you created must use a new <b>tensorDesc</b> that has not been used by another {@link NN_Tensor} instance.
* Otherwise, a <b>tensorDesc</b> will be released twice, which will bring a memory corruption of doulbe free. \n
*
* <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used by default. \n
*
* <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code. \n
* This method reuses the shared memory corresponding to the file descriptor <b>fd</b> passed. It may comes from another
* {@link NN_Tensor} instance. When you call the {@link OH_NNTensor_Destroy} method to release the tensor created by
* this method, the tensor data memory will not be released.\n
*
* Call {@link OH_NNTensor_DestroyTensor} to release the {@link NN_Tensor} instance. \n
* Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
* <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
*
* <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
* by default.\n
*
* <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.\n
*
* Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
*
* @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
* @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
* @param fd Fd of the shared memory to be resued.
* @param fd file descriptor of the shared memory to be resued.
* @param size Size of the shared memory to be resued.
* @param offset Offset of the shared memory to be resued.
* @return Pinter to a {@link NN_Tensor} instance, or NULL if it fails to create.
* @since 11
* @version 1.0
*/
NN_Tensor* OH_NNTensor_CreateWithFd(size_t deviceID,
NN_Tensor *OH_NNTensor_CreateWithFd(size_t deviceID,
NN_TensorDesc *tensorDesc,
int fd,
size_t size,
@ -631,14 +645,16 @@ NN_Tensor* OH_NNTensor_CreateWithFd(size_t deviceID,
/**
* @brief Releases a {@link NN_Tensor} instance.
*
* When the {@link NN_Tensor} instance is no longer used, this method needs to be called to release the instance.
* Otherwise, the memory leak will occur. \n
*
* If <b>tensor</b> or <b>*tensor</b> is a null pointer, this method will return error code and does not execute the release. \n
* When the {@link NN_Tensor} instance is no longer used, this method needs to be called to release the instance.
* Otherwise, the memory leak will occur.\n
*
* @param tensorDesc Double pointer to the {@link NN_Tensor} instance.
* If <b>tensor</b> or <b>*tensor</b> is a null pointer, this method will return error code and does not execute the
* release.\n
*
* @param tensor Double pointer to the {@link NN_Tensor} instance.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -647,69 +663,79 @@ OH_NN_ReturnCode OH_NNTensor_Destroy(NN_Tensor **tensor);
/**
* @brief Gets the {@link NN_TensorDesc} instance of a {@link NN_Tensor}.
*
* Call this method to obtain the inner {@link NN_TensorDesc} instance pointer of the specified {@link NN_Tensor} instance.
* You can get various types of the tensor attributes such as name/format/data type/shape from the returned {@link NN_TensorDesc} instance. \n
*
* You should not destory the returned {@link NN_TensorDesc} instance because it points to the inner instance of {@link NN_Tensor}.
* Otherwise, a menory corruption of double free will occur when {@link OH_NNTensor_Destroy} is called.
*
* if <b>tensor</b> is a null pointer, this method will return null pointer. \n
* Call this method to obtain the inner {@link NN_TensorDesc} instance pointer of the specified {@link NN_Tensor}
* instance. You can get various types of the tensor attributes such as name/format/data type/shape from the returned
* {@link NN_TensorDesc} instance.\n
*
* You should not destory the returned {@link NN_TensorDesc} instance because it points to the inner instance of
* {@link NN_Tensor}. Otherwise, a menory corruption of double free will occur when {@link OH_NNTensor_Destroy}
* is called.\n
*
* if <b>tensor</b> is a null pointer, this method will return null pointer.\n
*
* @param tensor Pointer to the {@link NN_Tensor} instance.
* @return Pointer to the {@link NN_TensorDesc} instance, or NULL if it fails to create.
* @since 11
* @version 1.0
*/
NN_TensorDesc* OH_NNTensor_GetTensorDesc(const NN_Tensor *tensor);
NN_TensorDesc *OH_NNTensor_GetTensorDesc(const NN_Tensor *tensor);
/**
* @brief Gets the data buffer of a {@link NN_Tensor}.
*
* You can read/write data from/to the tensor data buffer. The buffer is mapped from a shared memory on device, so the device dirver
* will get the tensor data directly by this "zero-copy" way. \n
* You can read/write data from/to the tensor data buffer. The buffer is mapped from a shared memory on device,
* so the device dirver will get the tensor data directly by this "zero-copy" way.\n
*
* Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
* {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
*
* Note that you are only allowed to access the tensor data buffer with length of (size - offset), otherwise a heap corruption may occur. \n
*
* if <b>tensor</b> is a null pointer, this method will return null pointer. \n
* if <b>tensor</b> is a null pointer, this method will return null pointer.\n
*
* @param tensor Pointer to the {@link NN_Tensor} instance.
* @return Pointer to data buffer of the tensor, or NULL if it fails to create.
* @since 11
* @version 1.0
*/
void* OH_NNTensor_GetDataBuffer(const NN_Tensor *tensor);
void *OH_NNTensor_GetDataBuffer(const NN_Tensor *tensor);
/**
* @brief Gets the data fd of a {@link NN_Tensor}.
* @brief Gets the file descriptor of the shared memory of a {@link NN_Tensor}.
*
* The <b>fd</b> corresponds to a shared memory on device driver, and can be resued by another {@link NN_Tensor} through
* {@link OH_NNTensor_CreateWithFd}. \n
*
* if <b>tensor</b> or <b>fd</b> is a null pointer, this method will return error code. \n
* The file descriptor <b>fd</b> corresponds to the shared memory of the tensor data, and can be resued
* by another {@link NN_Tensor} through {@link OH_NNTensor_CreateWithFd}.\n
*
* if <b>tensor</b> or <b>fd</b> is a null pointer, this method will return error code.\n
*
* @param tensor Pointer to the {@link NN_Tensor} instance.
* @param fd The returned fd of tensor data.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @param fd The returned file descriptor of the shared memory.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
OH_NN_ReturnCode OH_NNTensor_GetFd(const NN_Tensor *tensor, int *fd);
/**
* @brief Gets the data size of a {@link NN_Tensor}.
* @brief Gets the size of the shared memory of a {@link NN_Tensor}.
*
* The <b>size</b> is as same as the argument <b>size</b> of {@link OH_NNTensor_CreateWithSize} and {@link OH_NNTensor_CreateWithFd}.
* But for a tensor created by {@link OH_NNTensor_Create}, it equals to the tensor byte size. \n
*
* Note that the real tensor data just uses the buffer segment [offset, size] of the Fd. \n
*
* if <b>tensor</b> or <b>size</b> is a null pointer, this method will return error code. \n
* The <b>size</b> corresponds to the shared memory of the tensor data, and can be resued by another {@link NN_Tensor}
* through {@link OH_NNTensor_CreateWithFd}.\n
*
* The <b>size</b> is as same as the argument <b>size</b> of {@link OH_NNTensor_CreateWithSize} and
* {@link OH_NNTensor_CreateWithFd}. But for a tensor created by {@link OH_NNTensor_Create},
* it equals to the tensor byte size.\n
*
* Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
* {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
*
* if <b>tensor</b> or <b>size</b> is a null pointer, this method will return error code.\n
*
* @param tensor Pointer to the {@link NN_Tensor} instance.
* @param size The returned size of tensor data.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -718,17 +744,19 @@ OH_NN_ReturnCode OH_NNTensor_GetSize(const NN_Tensor *tensor, size_t *size);
/**
* @brief Get the data offset of a tensor.
*
* The <b>offset</b> corresponds to the fd of the tensor data, and can be resued by another {@link NN_Tensor} through
* {@link OH_NNTensor_CreateWithFd}. \n
*
* Note that the real tensor data just uses the buffer segment [offset, size] of the Fd. \n
*
* if <b>tensor</b> or <b>offset</b> is a null pointer, this method will return error code. \n
* The <b>offset</b> corresponds to the shared memory of the tensor data, and can be resued by another {@link NN_Tensor}
* through {@link OH_NNTensor_CreateWithFd}.\n
*
* Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
* {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
*
* if <b>tensor</b> or <b>offset</b> is a null pointer, this method will return error code.\n
*
* @param tensor Pointer to the {@link NN_Tensor} instance.
* @param offset The returned offset of tensor data.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -752,24 +780,31 @@ OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation);
/**
* @brief Obtains the dimension information about the output tensor.
*
* After {@link OH_NNExecutor_Run} is called to complete a single inference, call this method to obtain the specified output dimension
* information and number of dimensions. It is commonly used in dynamic shape input and output scenarios. \n
*
* As an output parameter, <b>*shape</b> must be a null pointer, otherwise the method will return an error code.
* Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of <b>shape</b>. \n
*
* You do not need to release the memory of <b>shape</b>. It will be released when <b>executor</b> is destroied. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that <b>outputIndices</b> is <b>{4, 6, 8}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* When {@link OH_NNExecutor_GetOutputShape} is called to obtain dimension information about the output tensor,
* <b>outputIndices</b> is <b>{0, 1, 2}</b>.
* After {@link OH_NNExecutor_Run} is called to complete a single inference, call this method to obtain the specified
* output dimension information and number of dimensions. It is commonly used in dynamic shape input and output
* scenarios.\n
*
* @param shape Pointer to the int32_t array. The value of each element in the array is the length of the output tensor in each dimension.
* If the <b>outputIndex</b> is greater than or equal to the output tensor number, this method will return error code.
* The output tensor number can be got by {@link OH_NNExecutor_GetOutputCount}.\n
*
* As an output parameter, <b>*shape</b> must be a null pointer, otherwise the method will return an error code.
* Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of <b>shape</b>.\n
*
* You do not need to release the memory of <b>shape</b>. It will be released when <b>executor</b> is destroied.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that <b>outputIndices</b> is <b>{4, 6, 8}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* When {@link OH_NNExecutor_GetOutputShape} is called to obtain dimension information about
* the output tensor, <b>outputIndices</b> is <b>{0, 1, 2}</b>.
* @param shape Pointer to the int32_t array. The value of each element in the array is the length of the output tensor
* in each dimension.
* @param shapeLength Pointer to the uint32_t type. The number of output dimensions is returned.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@ -825,8 +860,9 @@ OH_NN_ReturnCode OH_NNExecutor_GetOutputCount(const OH_NNExecutor *executor, siz
/**
* @brief Creates an input tensor descriptor with its index.
*
* The input tensor descriptor contains all attributes of the input tensor.
* If the index exceeds the inputCount - 1, this method will return error code. \n
* The input tensor descriptor contains all attributes of the input tensor.
* If the <b>index</b> is greater than or equal to the input tensor number, this method will return error code.
* The input tensor number can be got by {@link OH_NNExecutor_GetInputCount}.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param index Input tensor index.
@ -834,13 +870,14 @@ OH_NN_ReturnCode OH_NNExecutor_GetOutputCount(const OH_NNExecutor *executor, siz
* @since 11
* @version 1.0
*/
NN_TensorDesc* OH_NNExecutor_CreateInputTensorDesc(const OH_NNExecutor *executor, size_t index);
NN_TensorDesc *OH_NNExecutor_CreateInputTensorDesc(const OH_NNExecutor *executor, size_t index);
/**
* @brief Creates an output tensor descriptor with its index.
*
* The output tensor descriptor contains all attributes of the output tensor.
* If the index exceeds the outputCount - 1, this method will return error code. \n
* The output tensor descriptor contains all attributes of the output tensor.
* If the <b>index</b> is greater than or equal to the output tensor number, this method will return error code.
* The output tensor number can be got by {@link OH_NNExecutor_GetOutputCount}.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param index Output tensor index.
@ -848,31 +885,35 @@ NN_TensorDesc* OH_NNExecutor_CreateInputTensorDesc(const OH_NNExecutor *executor
* @since 11
* @version 1.0
*/
NN_TensorDesc* OH_NNExecutor_CreateOutputTensorDesc(const OH_NNExecutor *executor, size_t index);
NN_TensorDesc *OH_NNExecutor_CreateOutputTensorDesc(const OH_NNExecutor *executor, size_t index);
/**
* @brief Gets the dimension ranges of an input tensor.
*
* The supported dimension ranges of an input tensor with dynamic shape may be different among various devices.
* You can call this method to get the dimension ranges of the input tensor supported by the device.
* <b>*minInputDims</b> contains the minimum demensions of the input tensor, and <b>*maxInputDims</b> contains the maximum,
* e.g. if an input tensor has dynamic shape [-1, -1, -1, 3], its <b>*minInputDims</b> may be [1, 10, 10, 3] and
* <b>*maxInputDims</b> may be [100, 1024, 1024, 3] on the device. \n
*
* If the index exceeds the inputCount - 1, this method will return error code. \n
*
* As an output parameter, <b>*minInputDims</b> or <b>*maxInputDims</b> must be a null pointer, otherwise the method will return an error code.
* For example, you should define int32_t* minInDims = NULL, and pass &minInDims as the argument of <b>minInputDims</b>. \n
*
* You do not need to release the memory of <b>*minInputDims</b> or <b>*maxInputDims</b>. It will be released when <b>executor</b> is destroied. \n
* The supported dimension ranges of an input tensor with dynamic shape may be different among various devices.
* You can call this method to get the dimension ranges of the input tensor supported by the device.
* <b>*minInputDims</b> contains the minimum demensions of the input tensor, and <b>*maxInputDims</b> contains the
* maximum, e.g. if an input tensor has dynamic shape [-1, -1, -1, 3], its <b>*minInputDims</b> may be [1, 10, 10, 3]
* and <b>*maxInputDims</b> may be [100, 1024, 1024, 3] on the device.\n
*
* If the <b>index</b> is greater than or equal to the input tensor number, this method will return error code.
* The input tensor number can be got by {@link OH_NNExecutor_GetInputCount}.\n
*
* As an output parameter, <b>*minInputDims</b> or <b>*maxInputDims</b> must be a null pointer, otherwise the method
* will return an error code. For example, you should define int32_t* minInDims = NULL, and pass &minInDims as the
* argument of <b>minInputDims</b>.\n
*
* You do not need to release the memory of <b>*minInputDims</b> or <b>*maxInputDims</b>.
* It will be released when <b>executor</b> is destroied.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param index Input tensor index.
* @param minInputDims Returned pointer to an array contains the minimum dimensions of the input tensor.
* @param maxInputDims Returned pointer to an array contains the maximum dimensions of the input tensor.
* @param shapeLength Returned length of the shape of input tensor.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -940,20 +981,28 @@ OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor,
/**
* @brief Asynchronous execution of the model inference.
*
* Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} or
* {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by {@link OH_NNTensor_GetDataBuffer} must be filled.
* The executor will yield out the results by inference execution and fill them into output tensors data for you to read. \n
*
* In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, or you
* can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, and then read its real shape
* by {@link OH_NNTensorDesc_GetShape}. \n
*
* The method is non-blocked and will return immediately. The callback function handles are set by {@link OH_NNExecutor_SetOnRunDone}
* and {@link OH_NNExecutor_SetOnServiceDied}. If the execution time reaches the <b>timeout</b>, the execution will be terminated
* with no outputs, and the <b>errCode<b> returned in callback function {@link NN_OnRunDone} will be {@link OH_NN_TIMEOUT}. \n
*
* The <b>userData</b> is asynchronous execution identifier and will be returned as the first parameter of the callback function.
* You can input any value you want as long as it can identify different asynchronous executions. \n
* Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} or
* {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by {@link OH_NNTensor_GetDataBuffer}
* must be filled. The executor will yield out the results by inference execution and fill them into output tensors data
* for you to read.\n
*
* In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, or
* you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, and then read its real
* shape by {@link OH_NNTensorDesc_GetShape}.\n
*
* The method is non-blocked and will return immediately.\n
*
* The callback function handles are set by {@link OH_NNExecutor_SetOnRunDone}
* and {@link OH_NNExecutor_SetOnServiceDied}. The inference results and error code can be got by
* {@link NN_OnRunDone}. And you can deal with the abnormal termination of device driver service during
* asynchronous execution by {@link NN_OnServiceDied}.\n
*
* If the execution time reaches the <b>timeout</b>, the execution will be terminated
* with no outputs, and the <b>errCode<b> returned in callback function {@link NN_OnRunDone} will be
* {@link OH_NN_TIMEOUT}.\n
*
* The <b>userData</b> is asynchronous execution identifier and will be returned as the first parameter of the callback
* function. You can input any value you want as long as it can identify different asynchronous executions.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputTensor An array of input tensors {@link NN_Tensor}.
@ -962,8 +1011,9 @@ OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor,
* @param outputCount Number of output tensors.
* @param timeout Time limit (millisecond) of the asynchronous execution, e.g. 1000.
* @param userData Asynchronous execution identifier.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -973,22 +1023,25 @@ OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor,
NN_Tensor *outputTensor[],
size_t outputCount,
int32_t timeout,
void* userData);
void *userData);
/**
* @brief Obtains the IDs of all devices connected.
*
* Each device has an unique and fixed ID. This method returns device IDs on the current device through the uint32_t array. \n
* Each device has an unique and fixed ID. This method returns device IDs on the current device through the uint32_t
* array.\n
*
* Device IDs are returned through the size_t array. Each element of the array is the ID of a single device. \n
*
* The array memory is managed inside, so you do not need to care about it.
* The data pointer is valid before this method is called next time. \n
* Device IDs are returned through the size_t array. Each element of the array is the ID of a single device.\n
*
* @param allDevicesID Pointer to the size_t array. The input <b>*allDevicesID</b> must be a null pointer. Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
* @param deviceCount Pointer of the uint32_t type, which is used to return the length of <b>(*allDevicesID)</b>.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* The array memory is managed inside, so you do not need to care about it.
* The data pointer is valid before this method is called next time.\n
*
* @param allDevicesID Pointer to the size_t array. The input <b>*allDevicesID</b> must be a null pointer.
* Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
* @param deviceCount Pointer of the uint32_t type, which is used to return the length of <b>*allDevicesID</b>.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/
@ -997,18 +1050,19 @@ OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32
/**
* @brief Obtains the name of the specified device.
*
* <b>deviceID</b> specifies the device whose name will be obtained. The device ID needs to be obtained by calling {@link OH_NNDevice_GetAllDevicesID}.
* If it is 0, the first device in the current device list will be used by default. \n
*
* The value of <b>(*name)</b> is a C-style string ended with <b>'\0'</b>. <b>*name</b> must be a null pointer.
* Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
* Fou example, you should define char* deviceName = NULL, and pass &deviceName as the argument of <b>name</b>. \n
* <b>deviceID</b> specifies the device whose name will be obtained. The device ID needs to be obtained by calling
* {@link OH_NNDevice_GetAllDevicesID}.
* If it is 0, the first device in the current device list will be used by default.\n
*
* The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>. <b>*name</b> must be a null pointer.
* Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
* Fou example, you should define char* deviceName = NULL, and pass &deviceName as the argument of <b>name</b>.\n
*
* @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default.
* @param name The device name returned.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 9
* @version 1.0
*/

View File

@ -19,7 +19,6 @@
*
* @brief Provides APIs of Neural Network Runtime for accelerating the model inference.
*
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
* @version 2.0
*/
@ -34,6 +33,8 @@
*
* include "neural_network_runtime/neural_network_runtime.h"
* @library libneural_network_runtime.so
* @kit Neural Network Runtime Kit
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
* @version 2.0
*/
@ -43,7 +44,6 @@
#include "neural_network_runtime_type.h"
#include "neural_network_core.h"
#include "neural_network_runtime_compat.h"
#ifdef __cplusplus
extern "C" {
@ -65,14 +65,15 @@ NN_QuantParam *OH_NNQuantParam_Create();
/**
* @brief Sets the scales of the {@link NN_QuantParam} instance.
*
* The parameter <b>quantCount<\b> is the number of quantization parameters of a tensor, e.g. the quantCount is the channel
* count if the tensor is per-channel quantized. \n
* The parameter <b>quantCount</b> is the number of quantization parameters of a tensor, e.g. the quantCount is the
* channel count if the tensor is per-channel quantized.\n
*
* @param quantParams Pointer to the {@link NN_QuantParam} instance.
* @param scales An array of scales for all quantization parameters of the tensor.
* @param quantCount Number of quantization parameters of the tensor.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -81,14 +82,15 @@ OH_NN_ReturnCode OH_NNQuantParam_SetScales(NN_QuantParam *quantParams, const dou
/**
* @brief Sets the zero points of the {@link NN_QuantParam} instance.
*
* The parameter <b>quantCount<\b> is the number of quantization parameters of a tensor, e.g. the quantCount is the channel
* count if the tensor is per-channel quantized. \n
* The parameter <b>quantCount</b> is the number of quantization parameters of a tensor, e.g. the quantCount is the
* channel count if the tensor is per-channel quantized.\n
*
* @param quantParams Pointer to the {@link NN_QuantParam} instance.
* @param zeroPoints An array of zero points for all quantization parameters of the tensor.
* @param quantCount Number of quantization parameters of the tensor.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -97,14 +99,15 @@ OH_NN_ReturnCode OH_NNQuantParam_SetZeroPoints(NN_QuantParam *quantParams, const
/**
* @brief Sets the number bits of the {@link NN_QuantParam} instance.
*
* The parameter <b>quantCount<\b> is the number of quantization parameters of a tensor, e.g. the quantCount is the channel
* count if the tensor is per-channel quantized. \n
* The parameter <b>quantCount</b> is the number of quantization parameters of a tensor, e.g. the quantCount is the
* channel count if the tensor is per-channel quantized.\n
*
* @param quantParams Pointer to the {@link NN_QuantParam} instance.
* @param numBits An array of number bits for all quantization parameters of the tensor.
* @param quantCount Number of quantization parameters of the tensor.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @since 11
* @version 1.0
*/
@ -322,6 +325,283 @@ OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model,
const bool **isSupported,
uint32_t *opCount);
/**
* @brief Adds a tensor to a model instance.
*
* The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model.
* This method is used to add tensors to a model instance based on the <b>tensor</b> parameter.
* The sequence of adding tensors is specified by the index value recorded in the model.
* The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation},
* and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specifies tensors based on the index value.\n
*
* Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic
* shape, you need to set the dimensions that support dynamic changes in <b>tensor.dimensions</b> to <b>-1</b>.
* For example, if <b>tensor.dimensions</b> of a four-dimensional tensor is set to <b>[1, -1, 2, 2]</b>,
* the second dimension supports dynamic changes.\n
*
* @param model Pointer to the {@link OH_NNModel} instance.
* @param tensor Pointer to the {@link OH_NN_Tensor} tensor. The tensor specifies the attributes of the tensor added to
* the model instance.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNModel_AddTensorToModel}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor);
/**
* @brief Sets the single input data for a model.
*
* This method copies the data whose length is specified by <b>length</b> (in bytes) in <b>dataBuffer</b> to the shared
* memory of the underlying device. <b>inputIndex</b> specifies the input to be set and <b>tensor</b> sets information
* such as the input shape, type, and quantization parameters.\n
*
* Neural Network Runtime supports models with dynamical shape input. For fixed shape input and dynamic shape input
* scenarios, this method uses different processing policies.\n
*
* - Fixed shape input: The attributes of <b>tensor</b> must be the same as those of the tensor added by calling
* {@link OH_NNModel_AddTensor} in the composition phase.
* - Dynamic shape input: In the composition phase, because the shape is not fixed, each value in
* <b>tensor.dimensions</b> must be greater than <b>0</b> in the method calls to determine the shape input in the
* calculation phase. When setting the shape, you can modify only the dimension whose value is <b>-1</b>.
* Assume that <b>[-1, 224, 224, 3]</b> is input as the the dimension of A in the composition phase.
* When this method is called, only the size of the first dimension can be modified, e.g. to <b>[3, 224, 224, 3]</b>.
* If other dimensions are adjusted, {@link OH_NN_INVALID_PARAMETER} is returned.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In input settings, the index value for the three inputs is <b>{0, 1, 2}</b>.\n
* @param tensor Sets the tensor corresponding to the input data.
* @param dataBuffer Pointer to the input data.
* @param length Length of the data buffer, in bytes.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor,
uint32_t inputIndex,
const OH_NN_Tensor *tensor,
const void *dataBuffer,
size_t length);
/**
* @brief Sets the buffer for a single output of a model.
*
* This method binds the buffer to which <b>dataBuffer</b> points to the output specified by <b>outputIndex</b>.
* The length of the buffer is specified by <b>length</b>.\n
*
* After {@link OH_NNExecutor_Run} is called to complete a single model inference, Neural Network Runtime compares
* the length of the buffer to which <b>dataBuffer</b> points with the length of the output data and returns different
* results based on the actual situation.\n
*
* - If the buffer length is greater than or equal to the data length, the inference result is copied to the buffer and
* {@link OH_NN_SUCCESS} is returned. You can read the inference result from <b>dataBuffer</b>.
* - If the buffer length is smaller than the data length, {@link OH_NNExecutor_Run} returns
* {@link OH_NN_INVALID_PARAMETER} and generates a log indicating that the buffer is too small.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In output buffer settings, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param dataBuffer Pointer to the output data.
* @param length Length of the data buffer, in bytes.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor,
uint32_t outputIndex,
void *dataBuffer,
size_t length);
/**
* @brief Performs inference.
*
* Performs end-to-end inference and computing of the model on the device associated with the executor.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor);
/**
* @brief Allocates shared memory to a single input on a device.
*
* Neural Network Runtime provides a method for proactively allocating shared memory on a device.
* By specifying the executor and input index value, this method allocates shared memory whose size is specified by
* <b>length</b> on the device associated with a single input and returns the operation result through the
* {@link OH_NN_Memory} instance.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In the memory input application, the index value for the three inputs is <b>{0, 1, 2}</b>.
* @param length Memory size to be applied for, in bytes.
* @return Pointer to a {@link OH_NN_Memory} instance, or NULL if it fails to create.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_CreateWithSize}
* @since 9
* @version 1.0
*/
OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length);
/**
* @brief Allocates shared memory to a single output on a device.
*
* Neural Network Runtime provides a method for proactively allocating shared memory on a device.
* By specifying the executor and output index value, this method allocates shared memory whose size is specified by
* <b>length</b> on the device associated with a single output and returns the operation result through the
* {@link OH_NN_Memory} instance.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In output memory application, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param length Memory size to be applied for, in bytes.
* @return Pointer to a {@link OH_NN_Memory} instance, or NULL if it fails to create.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_CreateWithSize}
* @since 9
* @version 1.0
*/
OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length);
/**
* @brief Releases the input memory to which the {@link OH_NN_Memory} instance points.
*
* This method needs to be called to release the memory instance created by calling
* {@link OH_NNExecutor_AllocateInputMemory}. Otherwise, memory leak will occur.
* The mapping between <b>inputIndex</b> and <b>memory</b> must be the same as that in memory instance creation.\n
*
* If <b>memory</b> or <b>*memory</b> is a null pointer, this method only prints warning logs and does not execute
* the release logic.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In memory input release, the index value for the three inputs is <b>{0, 1, 2}</b>.
* @param memory Double pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed,
* this method sets <b>*memory</b> to a null pointer.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_Destroy}
* @since 9
* @version 1.0
*/
void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory);
/**
* @brief Releases the output memory to which the {@link OH_NN_Memory} instance points.
*
* This method needs to be called to release the memory instance created by calling
* {@link OH_NNExecutor_AllocateOutputMemory}. Otherwise, memory leak will occur.
* The mapping between <b>outputIndex</b> and <b>memory</b> must be the same as that in memory instance creation.\n
*
* If <b>memory</b> or <b>*memory</b> is a null pointer, this method only prints warning logs and does not execute
* the release logic.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In output memory release, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param memory Double pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed,
* this method sets <b>*memory</b> to a null pointer.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_Destroy}
* @since 9
* @version 1.0
*/
void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory);
/**
* @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory
* used by a single input.
*
* In scenarios where memory needs to be managed by yourself, this method binds the execution input to the
* {@link OH_NN_Memory} memory instance. During computing, the underlying device reads the input data from the shared
* memory pointed to by the memory instance. By using this method, concurrent execution of input setting, computing,
* and read can be implemented to improve inference efficiency of a data flow.\n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* When the input shared memory is specified, the index value for the three inputs is
* <b>{0, 1, 2}</b>.
* @param tensor Pointer to {@link OH_NN_Tensor}, used to set the tensor corresponding to a single input.
* @param memory Pointer to {@link OH_NN_Memory}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor,
uint32_t inputIndex,
const OH_NN_Tensor *tensor,
const OH_NN_Memory *memory);
/**
* @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory
* used by a single output.
*
* In scenarios where memory needs to be managed by yourself, this method binds the execution output to the
* {@link OH_NN_Memory} memory instance. When computing is performed, the underlying hardware directly writes the
* computing result to the shared memory to which the memory instance points. By using this method, concurrent execution
* of input setting, computing, and read can be implemented to improve inference efficiency of a data flow.\n
*
* @param executor Executor.
* @param outputIndex Output Index value, which is in the same sequence of the data output when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
* {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* When the output shared memory is specified, the index value for the three outputs is
* <b>{0, 1, 2}</b>.
* @param memory Pointer to {@link OH_NN_Memory}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes,
* see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor,
uint32_t outputIndex,
const OH_NN_Memory *memory);
#ifdef __cplusplus
}
#endif // __cplusplus

View File

@ -1,294 +0,0 @@
/*
* Copyright (c) 2022-2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @addtogroup NeuralNeworkRuntime
* @{
*
* @brief Provides APIs of Neural Network Runtime for accelerating the model inference.
*
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
* @version 2.0
*/
/**
* @file neural_network_runtime_compat.h
*
* @brief The APIs defined in this document are all compatible APIs of the previous version, which will be deprecated after five versions.
* It is recommended to use the new APIs defined by neural_network_core.h and neural_network_runtime.h.
*
* Note: Currently, the APIs of Neural Network Runtime do not support multi-thread calling. \n
*
* include "neural_network_runtime/neural_network_runtime_compat.h"
* @library libneural_network_runtime.so
* @since 11
* @version 1.0
*/
#ifndef NEURAL_NETWORK_RUNTIME_COMPAT_H
#define NEURAL_NETWORK_RUNTIME_COMPAT_H
#include "neural_network_runtime_type.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Adds a tensor to a model instance.
*
* The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model.
* This method is used to add tensors to a model instance based on the <b>tensor</b> parameter.
* The sequence of adding tensors is specified by the index value recorded in the model. The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation},
* and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specifies tensors based on the index value. \n
*
* Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic shape,
* you need to set the dimensions that support dynamic changes in <b>tensor.dimensions</b> to <b>-1</b>.
* For example, if <b>tensor.dimensions</b> of a four-dimensional tensor is set to <b>[1, -1, 2, 2]</b>, the second dimension supports dynamic changes. \n
*
* @param model Pointer to the {@link OH_NNModel} instance.
* @param tensor Pointer to the {@link OH_NN_Tensor} tensor. The tensor specifies the attributes of the tensor added to the model instance.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned.
* If the operation fails, an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNModel_AddTensorToModel}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor);
/**
* @brief Sets the single input data for a model.
*
* This method copies the data whose length is specified by <b>length</b> (in bytes) in <b>dataBuffer</b> to the shared memory
* of the underlying device. <b>inputIndex</b> specifies the input to be set and <b>tensor</b> sets information such as the input shape,
* type, and quantization parameters. \n
*
* Neural Network Runtime supports models with dynamical shape input. For fixed shape input and dynamic shape input scenarios,
* this method uses different processing policies. \n
*
* - Fixed shape input: The attributes of <b>tensor</b> must be the same as those of the tensor added by calling
* {@link OH_NNModel_AddTensor} in the composition phase.
* - Dynamic shape input: In the composition phase, because the shape is not fixed, each value in <b>tensor.dimensions</b> must be greater than
* <b>0</b> in the method calls to determine the shape input in the calculation phase. When setting the shape, you can modify
* only the dimension whose value is <b>-1</b>. Assume that <b>[-1, 224, 224, 3]</b> is input as the the dimension of A in the composition phase.
* When this method is called, only the size of the first dimension can be modified, for example, to <b>[3, 224, 224, 3]</b>.
* If other dimensions are adjusted, {@link OH_NN_INVALID_PARAMETER} is returned. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In input settings, the index value for the three inputs is <b>{0, 1, 2}</b>. \n
*
* @param tensor Sets the tensor corresponding to the input data.
* @param dataBuffer Pointer to the input data.
* @param length Length of the data buffer, in bytes.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor,
uint32_t inputIndex,
const OH_NN_Tensor *tensor,
const void *dataBuffer,
size_t length);
/**
* @brief Sets the buffer for a single output of a model.
*
* This method binds the buffer to which <b>dataBuffer</b> points to the output specified by <b>outputIndex</b>.
* The length of the buffer is specified by <b>length</b>. \n
*
* After {@link OH_NNExecutor_Run} is called to complete a single model inference, Neural Network Runtime compares
* the length of the buffer to which <b>dataBuffer</b> points with the length of the output data and returns different results
* based on the actual situation. \n
*
* - If the buffer length is greater than or equal to the data length, the inference result is copied to the buffer and
* {@link OH_NN_SUCCESS} is returned. You can read the inference result from <b>dataBuffer</b>.
* - If the buffer length is smaller than the data length, {@link OH_NNExecutor_Run} returns {@link OH_NN_INVALID_PARAMETER}
* and generates a log indicating that the buffer is too small. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs}
* is called. In output buffer settings, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param dataBuffer Pointer to the output data.
* @param length Length of the data buffer, in bytes.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor,
uint32_t outputIndex,
void *dataBuffer,
size_t length);
/**
* @brief Performs inference.
*
* Performs end-to-end inference and computing of the model on the device associated with the executor. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor);
/**
* @brief Allocates shared memory to a single input on a device.
*
* Neural Network Runtime provides a method for proactively allocating shared memory on a device. By specifying the executor and input index value,
* this method allocates shared memory whose size is specified by <b>length</b> on the device associated with a single input and returns the
* operation result through the {@link OH_NN_Memory} instance. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In the memory input application, the index value for the three inputs is <b>{0, 1, 2}</b>.
* @param length Memory size to be applied for, in bytes.
* @return Pointer to a {@link OH_NN_Memory} instance, or NULL if it fails to create.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_CreateWithSize}
* @since 9
* @version 1.0
*/
OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length);
/**
* @brief Allocates shared memory to a single output on a device.
*
* Neural Network Runtime provides a method for proactively allocating shared memory on a device. By specifying the executor and
* output index value, this method allocates shared memory whose size is specified by <b>length</b> on the device associated with
* a single output and returns the operation result through the {@link OH_NN_Memory} instance. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In output memory application, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param length Memory size to be applied for, in bytes.
* @return Pointer to a {@link OH_NN_Memory} instance, or NULL if it fails to create.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_CreateWithSize}
* @since 9
* @version 1.0
*/
OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length);
/**
* @brief Releases the input memory to which the {@link OH_NN_Memory} instance points.
*
* This method needs to be called to release the memory instance created by calling {@link OH_NNExecutor_AllocateInputMemory}.
* Otherwise, memory leak will occur.
* The mapping between <b>inputIndex</b> and <b>memory</b> must be the same as that in memory instance creation. \n
*
* If <b>memory</b> or <b>*memory</b> is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In memory input release, the index value for the three inputs is <b>{0, 1, 2}</b>.
* @param memory Double pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed, this method sets <b>*memory</b> to a null pointer.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_Destroy}
* @since 9
* @version 1.0
*/
void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory);
/**
* @brief Releases the output memory to which the {@link OH_NN_Memory} instance points.
*
* This method needs to be called to release the memory instance created by calling {@link OH_NNExecutor_AllocateOutputMemory}. Otherwise, memory leak will occur.
* The mapping between <b>outputIndex</b> and <b>memory</b> must be the same as that in memory instance creation. \n
*
* If <b>memory</b> or <b>*memory</b> is a null pointer, this method only prints warning logs and does not execute the release logic. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* In output memory release, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param memory Double pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed, this method sets <b>*memory</b> to a null pointer.
* @deprecated since 11
* @useinstead {@link OH_NNTensor_Destroy}
* @since 9
* @version 1.0
*/
void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory);
/**
* @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory used by a single input.
*
* In scenarios where memory needs to be managed by yourself, this method binds the execution input to the {@link OH_NN_Memory} memory instance.
* During computing, the underlying device reads the input data from the shared memory pointed to by the memory instance.
* By using this method, concurrent execution of input setting, computing, and read can be implemented to improve inference efficiency of a data flow. \n
*
* @param executor Pointer to the {@link OH_NNExecutor} instance.
* @param inputIndex Input index value, which is in the same sequence of the data input when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* When the input shared memory is specified, the index value for the three inputs is <b>{0, 1, 2}</b>.
* @param tensor Pointer to {@link OH_NN_Tensor}, used to set the tensor corresponding to a single input.
* @param memory Pointer to {@link OH_NN_Memory}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor,
uint32_t inputIndex,
const OH_NN_Tensor *tensor,
const OH_NN_Memory *memory);
/**
* @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory used by a single output.
*
* In scenarios where memory needs to be managed by yourself, this method binds the execution output to the {@link OH_NN_Memory} memory instance.
* When computing is performed, the underlying hardware directly writes the computing result to the shared memory to which the memory instance points.
* By using this method, concurrent execution of input setting, computing, and read can be implemented to improve inference efficiency of a data flow. \n
*
* @param executor Executor.
* @param outputIndex Output Index value, which is in the same sequence of the data output when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
* When output shared memory is specified, the index value for the three outputs is <b>{0, 1, 2}</b>.
* @param memory Pointer to {@link OH_NN_Memory}.
* @return Execution result of the function. If the operation is successful, <b>OH_NN_SUCCESS</b> is returned. If the operation fails,
* an error code is returned. For details about the error codes, see {@link OH_NN_ReturnCode}.
* @deprecated since 11
* @useinstead {@link OH_NNExecutor_RunSync}
* @since 9
* @version 1.0
*/
OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor,
uint32_t outputIndex,
const OH_NN_Memory *memory);
#ifdef __cplusplus
}
#endif // __cplusplus
/** @} */
#endif // NEURAL_NETWORK_RUNTIME_COMPAT_H

View File

@ -19,7 +19,6 @@
*
* @brief Provides APIs for accelerating the Neural Network Runtime model inference.
*
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
* @version 2.0
*/
@ -31,6 +30,8 @@
*
* include "neural_network_runtime/neural_network_runtime_type.h"
* @library libneural_network_runtime.so
* @kit Neural Network Runtime Kit
* @Syscap SystemCapability.Ai.NeuralNetworkRuntime
* @since 9
* @version 2.0
*/
@ -187,29 +188,41 @@ typedef enum {
/**
* @brief Defines the callback function handle for the post-process when the asynchronous execution has been done.
*
* Use the first argument <b>userData</b> to identify the asynchronous execution you want to get.
* It is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}. \n
* Use the second argument <b>errCode</b> of type {@link OH_NN_ReturnCode} to get the error code returned by the asynchronous execution. \n
* Use <b>userData</b> to identify the asynchronous execution you want to get.
* It is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}.\n
*
* @param userData Asynchronous execution identifier, which is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}.
* Use <b>errCode</b> of type {@link OH_NN_ReturnCode} to get the error code returned by the asynchronous execution.\n
*
* The <b>outputTensor</b> and <b>outputCount</b> are the inference results, which is the same as ones passed to
* {@link OH_NNExecutor_RunAsync}.\n
*
* @param userData Asynchronous execution identifier, which is the argument <b>userData</b> passed to
* {@link OH_NNExecutor_RunAsync}.
* @param errCode Error code {@link OH_NN_ReturnCode} returned by the asynchronous execution.
* @param outputTensor An array of output tensors {@link NN_Tensor} of the model, which is the same as the argument <b>outputTensor</b> passed to {@link OH_NNExecutor_RunAsync}
* @param outputCount Output tensor count, which is the same as the argument <b>outputCount</b> passed to {@link OH_NNExecutor_RunAsync}
* @param outputTensor An array of output tensors {@link NN_Tensor} of the model, which is the same as the argument
* <b>outputTensor</b> passed to {@link OH_NNExecutor_RunAsync}.
* @param outputCount Output tensor count, which is the same as the argument <b>outputCount</b> passed to
* {@link OH_NNExecutor_RunAsync}.
* @since 11
* @version 1.0
*/
typedef void (*NN_OnRunDone)(void*, OH_NN_ReturnCode, void* [], int32_t);
typedef void (*NN_OnRunDone)(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount);
/**
* @brief Defines the callback function handle for the post-process when the device driver service is dead during asynchronous execution.
* @brief Defines the callback function handle for the post-process when the device driver service is dead during
* asynchronous execution.
*
* You should recompile the model if this callback function is called. \n
* You should recompile the model if this callback function is called.\n
*
* @param userData Asynchronous execution identifier, which is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}.
* Use <b>userData</b> to identify the asynchronous execution you want to get.
* It is the argument <b>userData</b> passed to {@link OH_NNExecutor_RunAsync}.\n
*
* @param userData Asynchronous execution identifier, which is the argument <b>userData</b> passed to
* {@link OH_NNExecutor_RunAsync}.
* @since 11
* @version 1.0
*/
typedef void (*NN_OnServiceDied)(void*);
typedef void (*NN_OnServiceDied)(void *userData);
/**
* @brief Defines activation function types in the fusion operator.