Update HATs for NNRt HDI v2.0

Signed-off-by: yuhanshi <shiyuhan1@huawei.com>
This commit is contained in:
yuhanshi 2023-04-11 18:30:55 +08:00
parent 96a1ab118d
commit b773591a93
25 changed files with 2126 additions and 61 deletions

View File

@ -20,7 +20,10 @@ group("HatsHdfNnrtTest") {
if (xts_hats_rich == false) {
if (is_standard_system) {
if (xts_hats_nnrt) {
deps = [ "hdi:hdi" ]
deps = [
"hdi/v1_0:hdi",
"hdi/v2_0:hdi",
]
}
}
}

View File

@ -16,7 +16,7 @@ import("//test/xts/tools/build/suite.gni")
group("hdi") {
testonly = true
deps = [
"nnrtFunctionTest:HatsHdfNnrtFunctionTest",
"nnrtStabilityTest:HatsHdfNnrtStabilityTest",
"nnrtFunctionTest:HatsHdfNnrtFunctionV1_0Test",
"nnrtStabilityTest:HatsHdfNnrtStabilityV1_0Test",
]
}

View File

@ -104,7 +104,7 @@ void HDICommon::BuildAddGraphDynamic(OH_NNModel **model)
}
OH_NN_ReturnCode HDICommon::ConvertModel(OHOS::sptr<V1_0::INnrtDevice> device_, OH_NNModel *model,
V1_0::SharedBuffer &tensorBuffer, V1_0::Model **iModel)
OHOS::HDI::Nnrt::V1_0::SharedBuffer &tensorBuffer, V1_0::Model **iModel)
{
printf("[NNRtTest] [ConvertModel] convert OH_NNModel to V1_0::Model\n");
auto *innerModel = reinterpret_cast<InnerModel *>(model);
@ -140,7 +140,7 @@ OH_NN_ReturnCode HDICommon::ConvertModel(OHOS::sptr<V1_0::INnrtDevice> device_,
V1_0::IOTensor HDICommon::CreateIOTensor(OHOS::sptr<V1_0::INnrtDevice> &device)
{
V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
OHOS::HDI::Nnrt::V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
int ret = device->AllocateBuffer(ADDEND_BUFFER_LENGTH, buffer);
if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
printf("[NNRtTest] [CreateIOTensor] allocate buffer error. ret: %d, fd: %d\n", ret, buffer.fd);
@ -159,7 +159,7 @@ V1_0::IOTensor HDICommon::CreateInputIOTensor(OHOS::sptr<V1_0::INnrtDevice> &dev
std::cout << "The length param is invalid, length=0" << std::endl;
}
V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
OHOS::HDI::Nnrt::V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
auto ret = device->AllocateBuffer(length, buffer);
if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
@ -187,7 +187,7 @@ V1_0::IOTensor HDICommon::CreateOutputIOTensor(OHOS::sptr<V1_0::INnrtDevice> &de
printf("[NNRtTest] The length param is invalid, length=0");
}
V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
OHOS::HDI::Nnrt::V1_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
int ret = device->AllocateBuffer(length, buffer);
if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {

View File

@ -20,7 +20,7 @@ config("nnrtTest_config") {
visibility = [ ":*" ]
}
ohos_moduletest_suite("HatsHdfNnrtFunctionTest") {
ohos_moduletest_suite("HatsHdfNnrtFunctionV1_0Test") {
testonly = true
module_out_path = module_output_path
sources = [
@ -31,12 +31,12 @@ ohos_moduletest_suite("HatsHdfNnrtFunctionTest") {
]
include_dirs = [
"../../hdi",
"../../v1_0",
"//commonlibrary/c_utils/base/include",
"//third_party/googletest/googletest/include/",
"//foundation/ai/neural_network_runtime",
"//foundation/ai/neural_network_runtime/third_party/include",
"//third_party/mindspore/mindspore/lite/mindir/include",
"//third_party/mindspore/mindspore-src/source/mindspore/lite/mindir/include",
]
deps = [ "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime" ]
@ -53,4 +53,4 @@ ohos_moduletest_suite("HatsHdfNnrtFunctionTest") {
cflags = [ "-Wno-error" ]
public_configs = [ ":nnrtTest_config" ]
}
}

View File

@ -22,7 +22,7 @@ config("nnrtTest_config") {
visibility = [ ":*" ]
}
ohos_moduletest_suite("HatsHdfNnrtStabilityTest") {
ohos_moduletest_suite("HatsHdfNnrtStabilityV1_0Test") {
testonly = true
module_out_path = module_output_path
sources = [
@ -31,12 +31,12 @@ ohos_moduletest_suite("HatsHdfNnrtStabilityTest") {
]
include_dirs = [
"../../hdi",
"../../v1_0",
"//commonlibrary/c_utils/base/include",
"//third_party/googletest/googletest/include/",
"//foundation/ai/neural_network_runtime",
"//foundation/ai/neural_network_runtime/third_party/include",
"//third_party/mindspore/mindspore/lite/mindir/include",
"//third_party/mindspore/mindspore-src/source/mindspore/lite/mindir/include",
]
deps = [ "//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime" ]
@ -53,4 +53,4 @@ ohos_moduletest_suite("HatsHdfNnrtStabilityTest") {
cflags = [ "-Wno-error" ]
public_configs = [ ":nnrtTest_config" ]
}
}

View File

@ -149,53 +149,32 @@ HWTEST_F(StabilityTest, SUB_AI_NNRt_Reliability_South_Stress_0100, Reliability |
HWTEST_F(StabilityTest, SUB_AI_NNR_Reliability_South_Stress_0200, Reliability | MediumTest | Level2)
{
OHOS::sptr<V1_0::INnrtDevice> device = V1_0::INnrtDevice::Get();
std::vector<V1_0::Model *> iModels;
std::vector<OHOS::sptr<V1_0::IPreparedModel>> iPreparedModels;
std::vector<V1_0::SharedBuffer> tensorBuffers;
for (int i = 0; i < THREAD_NUM; i++) {
// build graph with NNModel
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
// convert NNModel to V1_0::Model
V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
V1_0::Model *iModel = nullptr;
auto retConvert = HDICommon::ConvertModel(device, model, tensorBuffer, &iModel);
EXPECT_EQ(OH_NN_SUCCESS, retConvert) << "ConvertModel failed";
if (retConvert != OH_NN_SUCCESS) {
break;
}
iModels.emplace_back(iModel);
tensorBuffers.emplace_back(tensorBuffer);
// prepare model
OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
V1_0::ModelConfig config = {
.enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
auto retPrepare = device->PrepareModel(*iModel, config, iPreparedModel);
EXPECT_EQ(HDF_SUCCESS, retPrepare) << "PrepareModel failed";
if (retPrepare != HDF_SUCCESS) {
break;
}
iPreparedModels.emplace_back(iPreparedModel);
}
// build graph with NNModel
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
// convert NNModel to V1_0::Model
V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
V1_0::Model *iModel = nullptr;
auto retConvert = HDICommon::ConvertModel(device, model, tensorBuffer, &iModel);
ASSERT_EQ(OH_NN_SUCCESS, retConvert) << "ConvertModel failed";
// prepare model
OHOS::sptr<V1_0::IPreparedModel> iPreparedModel;
V1_0::ModelConfig config = {
.enableFloat16 = false, .mode = V1_0::PERFORMANCE_EXTREME, .priority = V1_0::PRIORITY_HIGH};
auto retPrepare = device->PrepareModel(*iModel, config, iPreparedModel);
ASSERT_EQ(HDF_SUCCESS, retPrepare) << "PrepareModel failed";
// run model
for (int i = 0; i < STRESS_COUNT; i++) {
// create threads to run model
std::vector<std::thread> threads;
for (auto &iPreparedModel : iPreparedModels) {
threads.emplace_back(std::thread(RunModelTest, device, iPreparedModel));
}
// wait for thread finish
for (auto &th : threads) {
th.join();
}
if (i % PRINT_FREQ == 0) {
printf("[NnrtTest] SUB_AI_NNRt_Reliability_South_Stress_0200 times: %d/%d\n", i, STRESS_COUNT);
}
}
for (size_t i=0; i< iModels.size(); i++) {
mindspore::lite::MindIR_Model_Destroy(&iModels[i]);
if (tensorBuffers[i].fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffers[i]));
ASSERT_NO_FATAL_FAILURE(RunModelTest(device, iPreparedModel));
if (i % PRINT_FREQ == 0) {
printf("[NnrtTest] SUB_AI_NNRt_Reliability_South_Stress_0200 times: %d/%d\n", i, STRESS_COUNT);
}
}
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
ASSERT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}

22
ai/nnrt/hdi/v2_0/BUILD.gn Normal file
View File

@ -0,0 +1,22 @@
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//build/ohos_var.gni")
import("//test/xts/tools/build/suite.gni")
group("hdi") {
testonly = true
deps = [
"nnrtFunctionTest:HatsHdfNnrtFunctionV2_0Test",
"nnrtStabilityTest:HatsHdfNnrtStabilityV2_0Test",
]
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HDI_NNRT_TEST_H
#define HDI_NNRT_TEST_H
#include <string>
#include <vector>
#include <v2_0/innrt_device.h>
#include "gtest/gtest.h"
#include "interfaces/kits/c/neural_network_runtime.h"
#include "frameworks/native/memory_manager.h"
namespace V2_0 = OHOS::HDI::Nnrt::V2_0;
namespace OHOS::NeuralNetworkRuntime::Test {
class HDINNRtTest : public testing::Test {
public:
// device ptr
OHOS::sptr<V2_0::INnrtDevice> device_;
std::vector<void*> buffers_;
void SetUp() override
{
device_ = V2_0::INnrtDevice::Get();
if (device_ == nullptr) {
// std::cout << "Get HDI device failed." << std::endl;
GTEST_SKIP() << "Get HDI device failed.";
}
}
void TearDown() override
{
device_.clear();
}
};
} // namespace OHOS::NeuralNetworkRuntime::Test
#endif // HDI_NNRT_TEST_H

View File

@ -0,0 +1,295 @@
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sstream>
#include <algorithm>
#include <v2_0/nnrt_types.h>
#include "securec.h"
#include "interfaces/kits/c/neural_network_runtime.h"
#include "frameworks/native/inner_model.h"
#include "frameworks/native/memory_manager.h"
#include "hdi_nnrt_test_utils.h"
namespace OHOS::NeuralNetworkRuntime::Test {
void HDICommon::BuildAddGraph(OH_NNModel **model)
{
printf("[NNRtTest] BuildAddGraph start.\n");
// create OH_NNModel
*model = OH_NNModel_Construct();
ASSERT_NE(nullptr, model);
// add inputs of Add operation
int32_t dimensions[3]{3, 2, 2};
OH_NN_Tensor firstAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
OH_NN_Tensor secondAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
uint32_t inputIndicesValue[2]{0, 1};
OH_NN_UInt32Array inputIndices{inputIndicesValue, 2};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &firstAddend));
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &secondAddend));
// Add activation type and set value
OH_NN_Tensor activationType{OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE};
int8_t activationValue{0};
uint32_t paramIndicesValue{2};
OH_NN_UInt32Array paramIndices{&paramIndicesValue, 1};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &activationType));
int opCnt = 2;
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorData(*model, opCnt, (void *)&activationValue, sizeof(int8_t)));
// add output of Add operation
OH_NN_Tensor output{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
uint32_t outputIndicesValue{3};
OH_NN_UInt32Array outputIndices{&outputIndicesValue, 1};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &output));
// add Add operation to model
ASSERT_EQ(OH_NN_SUCCESS,
OH_NNModel_AddOperation(*model, OH_NN_OPS_ADD, &paramIndices, &inputIndices, &outputIndices));
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SpecifyInputsAndOutputs(*model, &inputIndices, &outputIndices));
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(*model));
printf("[NNRtTest] BuildAddGraph done.\n");
}
void HDICommon::BuildAddGraphDynamic(OH_NNModel **model)
{
printf("[NNRtTest] BuildAddGraphDynamic start.\n");
// create OH_NNModel
*model = OH_NNModel_Construct();
ASSERT_NE(nullptr, *model);
// add inputs of Add operation
int32_t dimensions[3]{-1, -1, -1};
OH_NN_Tensor firstAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
OH_NN_Tensor secondAddend{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
uint32_t inputIndicesValue[2]{0, 1};
OH_NN_UInt32Array inputIndices{inputIndicesValue, 2};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &firstAddend));
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &secondAddend));
// Add activation type and set value
OH_NN_Tensor activationType{OH_NN_INT8, 0, nullptr, nullptr, OH_NN_ADD_ACTIVATIONTYPE};
int8_t activationValue{OH_NN_FUSED_NONE};
uint32_t paramIndicesValue{2};
OH_NN_UInt32Array paramIndices{&paramIndicesValue, 1};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &activationType));
int opCnt = 2;
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SetTensorData(*model, opCnt, (void *)&activationValue, sizeof(int8_t)));
// add output of Add operation
OH_NN_Tensor output{OH_NN_FLOAT32, 3, dimensions, nullptr, OH_NN_TENSOR};
uint32_t outputIndicesValue{3};
OH_NN_UInt32Array outputIndices{&outputIndicesValue, 1};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_AddTensor(*model, &output));
// add Add operation to model
ASSERT_EQ(OH_NN_SUCCESS,
OH_NNModel_AddOperation(*model, OH_NN_OPS_ADD, &paramIndices, &inputIndices, &outputIndices));
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_SpecifyInputsAndOutputs(*model, &inputIndices, &outputIndices));
ASSERT_EQ(OH_NN_SUCCESS, OH_NNModel_Finish(*model));
printf("[NNRtTest] BuildAddGraphDynamic done\n");
}
OH_NN_ReturnCode HDICommon::ConvertModel(OHOS::sptr<V2_0::INnrtDevice> device_, OH_NNModel *model,
OHOS::HDI::Nnrt::V2_0::SharedBuffer &tensorBuffer, V2_0::Model **iModel)
{
printf("[NNRtTest] [ConvertModel] convert OH_NNModel to V2_0::Model\n");
auto *innerModel = reinterpret_cast<InnerModel *>(model);
std::shared_ptr<mindspore::lite::LiteGraph> m_liteGraph = innerModel->GetLiteGraphs();
if (m_liteGraph == nullptr) {
printf("[NNRtTest] Model is nullptr, cannot query supported operation.");
return OH_NN_NULL_PTR;
}
size_t tensorSize = mindspore::lite::MindIR_LiteGraph_GetConstTensorSize(m_liteGraph.get());
std::cout << "[ConvertModel] const tensorsize:" << tensorSize << std::endl;
int32_t hdiRet{0};
if (tensorSize > 0) {
hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
if (hdiRet != HDF_SUCCESS || tensorBuffer.fd == NNRT_INVALID_FD) {
printf("[NNRtTest] [ConvertModel] allocate tensor buffer failed after get const tensor size,"\
"ret:%d\n", hdiRet);
return OH_NN_FAILED;
}
}
*iModel = mindspore::lite::MindIR_LiteGraph_To_Model_V2_0(m_liteGraph.get(), tensorBuffer);
if (iModel == nullptr) {
printf("[NNRtTest] Parse litegraph to hdi model failed.\n");
device_->ReleaseBuffer(tensorBuffer);
return OH_NN_FAILED;
}
// release model
OH_NNModel_Destroy(&model);
model = nullptr;
printf("[NNRtTest] [ConvertModel] convert model done\n");
return OH_NN_SUCCESS;
}
V2_0::IOTensor HDICommon::CreateIOTensor(OHOS::sptr<V2_0::INnrtDevice> &device)
{
OHOS::HDI::Nnrt::V2_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
int ret = device->AllocateBuffer(ADDEND_BUFFER_LENGTH, buffer);
if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
printf("[NNRtTest] [CreateIOTensor] allocate buffer error. ret: %d, fd: %d\n", ret, buffer.fd);
}
V2_0::IOTensor tensor{.name = "tensor",
.dataType = V2_0::DATA_TYPE_FLOAT32,
.dimensions = TENSOR_DIMS,
.format = V2_0::FORMAT_NHWC,
.data = buffer};
return tensor;
}
V2_0::IOTensor HDICommon::CreateInputIOTensor(OHOS::sptr<V2_0::INnrtDevice> &device, size_t length, float* data)
{
if (length == 0) {
std::cout << "The length param is invalid, length=0" << std::endl;
}
OHOS::HDI::Nnrt::V2_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
auto ret = device->AllocateBuffer(length, buffer);
if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
printf("[NNRtTest] [CreateInputIOTensor] allocate buffer error. ret: %d, fd: %d\n", ret, buffer.fd);
}
auto memManager = MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(buffer.fd, length);
if (memAddress == nullptr) {
printf("[NNRtTest] [CreateInputIOTensor] map fd to address failed.\n");
}
memcpy_s(memAddress, length, data, length);
V2_0::IOTensor tensor{.name = "tensor",
.dataType = V2_0::DATA_TYPE_FLOAT32,
.dimensions = {3, 2, 2},
.format = V2_0::FORMAT_NHWC,
.data = buffer};
return tensor;
}
V2_0::IOTensor HDICommon::CreateOutputIOTensor(OHOS::sptr<V2_0::INnrtDevice> &device, size_t length)
{
if (length == 0) {
printf("[NNRtTest] The length param is invalid, length=0");
}
OHOS::HDI::Nnrt::V2_0::SharedBuffer buffer{NNRT_INVALID_FD, 0, 0, 0};
int ret = device->AllocateBuffer(length, buffer);
if (ret != HDF_SUCCESS || buffer.fd == NNRT_INVALID_FD) {
printf("[NNRtTest] Allocate buffer error. ErrorCode: %d, fd: %d", ret, buffer.fd);
}
V2_0::IOTensor tensor{.name = "tensor",
.dataType = V2_0::DATA_TYPE_FLOAT32,
.dimensions = {3, 2, 2},
.format = V2_0::FORMAT_NHWC,
.data = buffer};
return tensor;
}
void* HDICommon::MapMemory(int fd, size_t length)
{
auto memManager = MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(fd, length);
if (memAddress == nullptr) {
printf("[NNRtTest] Map fd to address failed.");
return nullptr;
}
return memAddress;
}
void HDICommon::UnmapMemory(float* buffer)
{
auto memManager = MemoryManager::GetInstance();
auto ret = memManager->UnMapMemory(buffer);
if (ret != OH_NN_SUCCESS) {
printf("[NNRtTest] [UnmapMemory] unmap memory failed. ret:%d.\n", ret);
}
}
void HDICommon::SetData(float* buffer, size_t length, float* data)
{
if (buffer == nullptr || data == nullptr) {
printf("[NNRtTest] [SetData] buffer or data is nullprt\n");
return;
}
int ret = memcpy_s(buffer, length, data, length);
if (ret != 0) {
printf("[NNRtTest] [SetData] set data failed, error code: %d\n", ret);
}
}
void HDICommon::ReleaseBufferOfTensors(OHOS::sptr<V2_0::INnrtDevice> &device, std::vector<V2_0::IOTensor> &tensors)
{
if (device == nullptr) {
printf("[NNRtTest] [ReleaseBufferOfTensors] device is nullptr.\n");
return;
}
for (auto &tensor : tensors) {
auto ret = device->ReleaseBuffer(tensor.data);
if (ret != HDF_SUCCESS) {
printf("[NNRtTest] [ReleaseBufferOfTensors] release buffer failed, fd:%d ret:%d.\n", tensor.data.fd, ret);
}
}
}
void HDICommon::UnmapAllMemory(std::vector<void* > &buffers)
{
auto memoryMenager = MemoryManager::GetInstance();
for (auto buffer : buffers) {
auto ret = memoryMenager->UnMapMemory(buffer);
if (ret != OH_NN_SUCCESS) {
printf("[NNRtTest] [UnmapAllMemory] release buffer failed, ret:%d.\n", ret);
}
}
}
bool CheckExpectOutput(const std::vector<float> &output, const std::vector<float> &expect)
{
if (output.empty() || expect.empty()) {
printf("[NNRtTest] [CheckExpectOutput] output or expect is empty.\n");
return false;
}
int outputSize = output.size();
int expectSize = expect.size();
if (outputSize != expectSize) {
printf("[NNRtTest] [CheckExpectOutput] output size not match: expect:%d, actual:%d\n", outputSize, expectSize);
return false;
}
for (int i = 0; i < outputSize; i++) {
if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) {
printf("[NNRtTest] [CheckExpectOutput] output %d not match: expect:%f, actual:%f\n", i, float(expect[i]),
float(output[i]));
return false;
}
}
return true;
}
void PrintTensor(const float *buffer, size_t length)
{
std::stringstream ss;
size_t printNum = std::min(length, PRINT_NUM);
for (size_t i = 0; i < printNum; i++) {
ss << std::to_string(buffer[i]) << " ";
}
printf("[NNRtTest] [data] %s\n", ss.str().c_str());
}
} // namespace OHOS::NeuralNetworkRuntime::Test

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HDI_NNRT_TEST_UTILS_H
#define HDI_NNRT_TEST_UTILS_H
#include <string>
#include <vector>
#include <v2_0/innrt_device.h>
#include <v2_0/nnrt_types.h>
#include "gtest/gtest.h"
#include "mindir_lite_graph.h"
#include "interfaces/kits/c/neural_network_runtime_type.h"
namespace V2_0 = OHOS::HDI::Nnrt::V2_0;
namespace OHOS::NeuralNetworkRuntime::Test {
// invaild file discription
const int NNRT_INVALID_FD = -1;
const uint32_t ADDEND_DATA_SIZE = 12;
const uint32_t ADDEND_BUFFER_LENGTH = ADDEND_DATA_SIZE * sizeof(float);
const std::vector<int32_t> TENSOR_DIMS = {3, 2, 2};
const float ADD_VALUE_1 = 1;
const float ADD_VALUE_2 = 2;
const float ADD_VALUE_RESULT = 3;
const size_t PRINT_NUM = 10;
class HDICommon {
public:
static void BuildAddGraph(OH_NNModel **model);
static void BuildAddGraphDynamic(OH_NNModel **model);
static OH_NN_ReturnCode ConvertModel(OHOS::sptr<V2_0::INnrtDevice> device_, OH_NNModel *model,
V2_0::SharedBuffer &tensorBuffer, V2_0::Model **iModel);
static V2_0::IOTensor CreateIOTensor(OHOS::sptr<V2_0::INnrtDevice> &device);
static V2_0::IOTensor CreateInputIOTensor(OHOS::sptr<V2_0::INnrtDevice> &device, size_t length, float* data);
static V2_0::IOTensor CreateOutputIOTensor(OHOS::sptr<V2_0::INnrtDevice> &device, size_t length);
static void* MapMemory(int fd, size_t length);
static void UnmapMemory(float* buffer);
static void UnmapAllMemory(std::vector<void* > &buffers);
static void SetData(float* buffer, size_t length, float* data);
static void ReleaseBufferOfTensors(OHOS::sptr<V2_0::INnrtDevice> &device, std::vector<V2_0::IOTensor> &tensors);
};
bool CheckExpectOutput(const std::vector<float> &output, const std::vector<float> &expect);
void PrintTensor(const float *buffer, size_t length);
} // namespace OHOS::NeuralNetworkRuntime::Test
#endif

View File

@ -0,0 +1,46 @@
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//build/ohos.gni")
import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni")
import("//test/xts/tools/build/suite.gni")
module_output_path = "hats/nnrt"
config("nnrtTest_config") {
visibility = [ ":*" ]
}
ohos_moduletest_suite("HatsHdfNnrtFunctionV2_0Test") {
testonly = true
module_out_path = module_output_path
sources = [
"../common/hdi_nnrt_test_utils.cpp",
"src/hdi_device_test.cpp",
"src/hdi_model_prepare_test.cpp",
"src/hdi_model_run_test.cpp",
]
include_dirs = [ "../../v2_0" ]
external_deps = [
"c_utils:utils",
"drivers_interface_nnrt:libnnrt_proxy_2.0",
"hilog_native:libhilog",
"ipc:ipc_single",
"mindspore:mindir",
"neural_network_runtime:nnrt_target",
]
cflags = [ "-Wno-error" ]
public_configs = [ ":nnrtTest_config" ]
}

View File

@ -0,0 +1,18 @@
{
"kits": [
{
"push": [
"HatsHdfNnrtFunctionTest->/data/local/tmp/HatsHdfNnrtFunctionTest"
],
"type": "PushKit"
}
],
"driver": {
"native-test-timeout": "120000",
"type": "CppTest",
"module-name": "HatsHdfNnrtFunctionTest",
"runtime-hint": "1s",
"native-test-device-path": "/data/local/tmp"
},
"description": "Configuration for HatsHdfNnrtFunctionTest Tests"
}

View File

@ -0,0 +1,305 @@
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <v2_0/nnrt_types.h>
#include <v2_0/innrt_device.h>
#include <v2_0/iprepared_model.h>
#include "gtest/gtest.h"
#include "mindir.h"
#include "mindir_lite_graph.h"
#include "interfaces/kits/c/neural_network_runtime.h"
#include "common/hdi_nnrt_test_utils.h"
#include "common/hdi_nnrt_test.h"
using namespace std;
using namespace testing::ext;
using namespace OHOS::NeuralNetworkRuntime;
using namespace OHOS::NeuralNetworkRuntime::Test;
namespace {
class DeviceTest : public HDINNRtTest {};
} // namespace
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0100
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0100, Function | MediumTest | Level2)
{
std::string deviceName;
auto hdiRet = device_->GetDeviceName(deviceName);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
std::cout << "deviceName:" << deviceName << std::endl;
ASSERT_TRUE(!deviceName.empty());
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0200
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0200, Function | MediumTest | Level2)
{
std::string vendorName;
auto hdiRet = device_->GetVendorName(vendorName);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
std::cout << "vendorName:" << vendorName << std::endl;
ASSERT_TRUE(!vendorName.empty());
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0300
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0300, Function | MediumTest | Level1)
{
V2_0::DeviceType deviceType;
auto hdiRet = device_->GetDeviceType(deviceType);
ASSERT_EQ(HDF_SUCCESS, hdiRet);
ASSERT_TRUE(deviceType == V2_0::DeviceType::OTHER || deviceType == V2_0::DeviceType::GPU ||
deviceType == V2_0::DeviceType::CPU || deviceType == V2_0::DeviceType::ACCELERATOR)
<< deviceType;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0400
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0400, Function | MediumTest | Level1)
{
V2_0::DeviceStatus deviceStatus;
auto hdiRet = device_->GetDeviceStatus(deviceStatus);
ASSERT_EQ(HDF_SUCCESS, hdiRet);
ASSERT_TRUE(deviceStatus == V2_0::DeviceStatus::AVAILABLE || deviceStatus == V2_0::DeviceStatus::BUSY ||
deviceStatus == V2_0::DeviceStatus::OFFLINE || deviceStatus == V2_0::DeviceStatus::UNKNOWN)
<< deviceStatus;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0100
* @tc.desc: Float16精度运算Float32的模型
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0100, Function | MediumTest | Level2)
{
bool isSupportedFp16;
auto hdiRet = device_->IsFloat16PrecisionSupported(isSupportedFp16);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0200
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0200, Function | MediumTest | Level2)
{
bool isSupportedPerformance;
auto hdiRet = device_->IsPerformanceModeSupported(isSupportedPerformance);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0300
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0300, Function | MediumTest | Level2)
{
bool isSupportedPriority;
auto hdiRet = device_->IsPrioritySupported(isSupportedPriority);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0400
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0400, Function | MediumTest | Level2)
{
bool isSupportedDynamicInput;
auto hdiRet = device_->IsDynamicInputSupported(isSupportedDynamicInput);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0500
* @tc.desc:
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0500, Function | MediumTest | Level2)
{
bool isSupportedCache;
auto hdiRet = device_->IsModelCacheSupported(isSupportedCache);
ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet;
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Device_ModelSupport_0100
* @tc.name : nodes为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_ModelSupport_0100, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set nodes to empty
iModel->nodes = {};
std::vector<bool> supportedOperations;
EXPECT_EQ(HDF_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
EXPECT_TRUE(supportedOperations.empty());
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Device_ModelSupport_0200
* @tc.name : nodes中存在NodeType为-1
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_ModelSupport_0200, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set node[0] to
for (auto &node : iModel->nodes) {
node.nodeType = static_cast<V2_0::NodeType>(-1);
}
std::vector<bool> supportedOperations;
EXPECT_EQ(HDF_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
for (uint32_t i = 0; i < supportedOperations.size(); i++) {
EXPECT_EQ(false, supportedOperations[i]);
}
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Device_ModelSupport_0300
* @tc.name : nodes中存在NodeType为10000
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_ModelSupport_0300, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
for (auto &node : iModel->nodes) {
node.nodeType = static_cast<V2_0::NodeType>(100000);
}
std::vector<bool> supportedOperations;
EXPECT_EQ(HDF_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations));
for (uint32_t i = 0; i < supportedOperations.size(); i++) {
EXPECT_EQ(false, supportedOperations[i]);
}
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0100
* @tc.desc: length为1
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0100, Function | MediumTest | Level1)
{
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
size_t tensorSize = 1;
auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
EXPECT_TRUE(hdiRet == HDF_SUCCESS) << hdiRet;
EXPECT_TRUE(tensorBuffer.fd != NNRT_INVALID_FD && tensorBuffer.bufferSize == tensorSize)
<< tensorBuffer.fd << tensorBuffer.bufferSize;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0200
* @tc.desc: length为0
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0200, Function | MediumTest | Level3)
{
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
size_t tensorSize = 0;
auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
EXPECT_TRUE(hdiRet != HDF_SUCCESS) << hdiRet;
EXPECT_TRUE(tensorBuffer.fd == NNRT_INVALID_FD) << tensorBuffer.fd;
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0300
* @tc.desc: buffer为空
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0300, Function | MediumTest | Level2)
{
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
auto hdiRet = device_->ReleaseBuffer(tensorBuffer);
EXPECT_TRUE(hdiRet != HDF_SUCCESS);
}
/**
* @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0400
* @tc.desc: buffer释放成功
* @tc.type: FUNC
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0400, Function | MediumTest | Level1)
{
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
size_t tensorSize = 224;
auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer);
EXPECT_TRUE(hdiRet == HDF_SUCCESS && tensorBuffer.fd != NNRT_INVALID_FD && tensorBuffer.bufferSize == tensorSize)
<< tensorBuffer.fd << tensorBuffer.bufferSize;
hdiRet = device_->ReleaseBuffer(tensorBuffer);
EXPECT_TRUE(hdiRet == HDF_SUCCESS);
}

View File

@ -0,0 +1,616 @@
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <v2_0/nnrt_types.h>
#include <v2_0/innrt_device.h>
#include <v2_0/iprepared_model.h>
#include "gtest/gtest.h"
#include "mindir.h"
#include "mindir_lite_graph.h"
#include "interfaces/kits/c/neural_network_runtime.h"
#include "common/hdi_nnrt_test_utils.h"
#include "common/hdi_nnrt_test.h"
using namespace std;
using namespace testing::ext;
using namespace OHOS::NeuralNetworkRuntime;
using namespace OHOS::NeuralNetworkRuntime::Test;
namespace {
class ModelPrepareTest : public HDINNRtTest {};
} // namespace
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_ExportModelCache_0100
* @tc.name : cache
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_ExportModelCache_0100, Function | MediumTest | Level1)
{
bool isSupportedCache = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
if (!isSupportedCache) {
GTEST_SKIP() << "Export cache is not supported.";
}
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig config;
config.enableFloat16 = false;
config.mode = V2_0::PERFORMANCE_NONE;
config.priority = V2_0::PRIORITY_NONE;
// prepared model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, config, iPreparedModel));
// export model cache
std::vector<V2_0::SharedBuffer> modelCache;
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0100
* @tc.name : modelCache为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0100, Function | MediumTest | Level3)
{
bool isSupportedCache = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
if (!isSupportedCache) {
GTEST_SKIP() << "Export cache is not supported.";
}
V2_0::ModelConfig config;
config.enableFloat16 = false;
config.mode = V2_0::PERFORMANCE_NONE;
config.priority = V2_0::PRIORITY_NONE;
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
std::vector<V2_0::SharedBuffer> modelCache;
// prepared model with empty model cache
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModelFromModelCache(modelCache, config, iPreparedModel));
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0200
* @tc.name : modelCache不匹配
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0200, Function | MediumTest | Level3)
{
bool isSupportedCache = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
if (!isSupportedCache) {
GTEST_SKIP() << "Export cache is not supported.";
}
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig config;
config.enableFloat16 = false;
config.mode = V2_0::PERFORMANCE_NONE;
config.priority = V2_0::PRIORITY_NONE;
// export model cache
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, config, iPreparedModel));
std::vector<V2_0::SharedBuffer> modelCache;
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
// prepare model from invalid model cache
OHOS::HDI::Nnrt::V2_0::SharedBuffer invalidBuffer{NNRT_INVALID_FD, 0, 0, 0};
modelCache.emplace_back(invalidBuffer);
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel1;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModelFromModelCache(modelCache, config, iPreparedModel1));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0300
* @tc.name : modelCache不完整
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModelFromCache_0300, Function | MediumTest | Level3)
{
bool isSupportedCache = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isSupportedCache));
if (!isSupportedCache) {
GTEST_SKIP() << "Export cache is not supported.";
}
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig config;
config.enableFloat16 = false;
config.mode = V2_0::PERFORMANCE_NONE;
config.priority = V2_0::PRIORITY_NONE;
// export model cache
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, config, iPreparedModel));
std::vector<V2_0::SharedBuffer> modelCache;
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
// prepare model from invalid model cache
modelCache.resize(size_t(modelCache.size() * 0.9));
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel1;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModelFromModelCache(modelCache, config, iPreparedModel1));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0100
* @tc.name : model中inputIndex为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0100, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set inputIndex to empty
iModel->inputIndex = {};
// prepare model with empty inputIndex
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0200
* @tc.name : model中outputIndex为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0200, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set outputIndex to empty
iModel->outputIndex = {};
// prepare model with empty outputIndex
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0300
* @tc.name : model中nodes为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0300, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set nodes to empty
iModel->nodes = {};
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
// prepare model with empty nodes
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0400
* @tc.name : model中allTensors为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0400, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set Model.allTensors empty
iModel->allTensors = {};
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0500
* @tc.name : Tensor的DataTyp为100000
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0500, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set invalid Tensor.DataType
auto &Tensor = iModel->allTensors[0];
Tensor.dataType = static_cast<V2_0::DataType>(100000);
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
// prepare model
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0600
* @tc.name : Tensor的Format值为100000
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0600, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set invalid Tensor.Format
auto &Tensor = iModel->allTensors[0];
Tensor.format = static_cast<V2_0::Format>(100000);
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0700
* @tc.name : model中subGraph为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0700, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set empty Model.subGraph
iModel->subGraph = {};
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0800
* @tc.name : model中subGraph输入输出错误
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0800, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// set wrong input of subGraph
auto &subGraph = iModel->subGraph[0];
subGraph.inputIndices = {0, 1, 3};
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_NONE, V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_FAILURE, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_0900
* @tc.name : config中mode为PERFORMANCE_NONE-1
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_0900, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig modelConfig{true, static_cast<V2_0::PerformanceMode>(V2_0::PERFORMANCE_NONE - 1),
V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_1000
* @tc.name : config中mode为PERFORMANCE_EXTREME+1
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_1000, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
EXPECT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig modelConfig{true, static_cast<V2_0::PerformanceMode>(V2_0::PERFORMANCE_EXTREME + 1),
V2_0::PRIORITY_NONE};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_1100
* @tc.name : config中priority为PRIORITY_NONE-1
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_1100, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
EXPECT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_EXTREME,
static_cast<V2_0::Priority>(V2_0::PRIORITY_NONE - 1)};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_PreparedModel_1200
* @tc.name : config中priority为PRIORITY_HIGH+1
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_PreparedModel_1200, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig modelConfig{true, V2_0::PERFORMANCE_EXTREME,
static_cast<V2_0::Priority>(V2_0::PRIORITY_HIGH + 1)};
V2_0::sptr<V2_0::IPreparedModel> preparedModel;
EXPECT_EQ(HDF_ERR_INVALID_PARAM, device_->PrepareModel(*iModel, modelConfig, preparedModel));
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_GetInputDimRanges_0100
* @tc.name :
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_GetInputDimRanges_0100, Function | MediumTest | Level2)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
// conver model from OH_NNModel to V2_0:Model
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig modelConfig;
modelConfig.enableFloat16 = false;
modelConfig.mode = V2_0::PERFORMANCE_NONE;
modelConfig.priority = V2_0::PRIORITY_NONE;
// prepared model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
std::vector<std::vector<uint32_t>> minInputsDim;
std::vector<std::vector<uint32_t>> maxInputsDim;
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->GetInputDimRanges(minInputsDim, maxInputsDim));
printf("the value of minInputsDim : ");
for (size_t i = 0; i < minInputsDim.size(); ++i) {
for (size_t j = 0; j < minInputsDim.size(); ++j) {
printf("%u ", minInputsDim[i][j]);
}
}
printf("\n");
printf("the value of maxInputsDim : ");
for (size_t i = 0; i < maxInputsDim.size(); ++i) {
for (size_t j = 0; j < maxInputsDim.size(); ++j) {
printf("%u ", maxInputsDim[i][j]);
}
}
printf("\n");
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_GetInputDimRanges_0200
* @tc.name :
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelPrepareTest, SUB_AI_NNRt_Func_South_Model_GetInputDimRanges_0200, Function | MediumTest | Level2)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraphDynamic(&model);
ASSERT_NE(model, nullptr);
// conver model from OH_NNModel to V2_0:Model
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
V2_0::ModelConfig modelConfig;
modelConfig.enableFloat16 = false;
modelConfig.mode = V2_0::PERFORMANCE_NONE;
modelConfig.priority = V2_0::PRIORITY_NONE;
// prepare model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
std::vector<std::vector<uint32_t>> minInputsDim;
std::vector<std::vector<uint32_t>> maxInputsDim;
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->GetInputDimRanges(minInputsDim, maxInputsDim));
printf("the value of minInputsDim : ");
for (size_t i = 0; i < minInputsDim.size(); ++i) {
for (size_t j = 0; j < minInputsDim.size(); ++j) {
printf("%u ", minInputsDim[i][j]);
}
}
printf("\n");
printf("the value of maxInputsDim : ");
for (size_t i = 0; i < maxInputsDim.size(); ++i) {
for (size_t j = 0; j < maxInputsDim.size(); ++j) {
printf("%u ", maxInputsDim[i][j]);
}
}
printf("\n");
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}

View File

@ -0,0 +1,421 @@
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <v2_0/nnrt_types.h>
#include <v2_0/innrt_device.h>
#include <v2_0/iprepared_model.h>
#include "gtest/gtest.h"
#include "mindir.h"
#include "mindir_lite_graph.h"
#include "interfaces/kits/c/neural_network_runtime.h"
#include "frameworks/native/memory_manager.h"
#include "common/hdi_nnrt_test_utils.h"
#include "common/hdi_nnrt_test.h"
using namespace std;
using namespace testing::ext;
using namespace OHOS::NeuralNetworkRuntime;
using namespace OHOS::NeuralNetworkRuntime::Test;
namespace {
class ModelRunTest : public HDINNRtTest {};
void AddModelTest(OHOS::sptr<V2_0::INnrtDevice> &device_, V2_0::ModelConfig &modelConfig, bool isDynamic)
{
OH_NNModel *model = nullptr;
if (isDynamic) {
HDICommon::BuildAddGraphDynamic(&model);
} else {
HDICommon::BuildAddGraph(&model);
}
ASSERT_NE(model, nullptr);
// conver model from OH_NNModel to V2_0:Model
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// prepare model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
std::vector<V2_0::IOTensor> inputs;
std::vector<V2_0::IOTensor> outputs;
std::vector<std::vector<int32_t>> outputsDims;
std::vector<bool> isOutputBufferEnough;
std::vector<void* > mapedMemorys;
// set inputs
std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
for (uint32_t i = 0; i < inputValue.size(); i++) {
std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
auto tensor = HDICommon::CreateIOTensor(device_);
auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
// set input data
HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
inputs.emplace_back(tensor);
}
// set outputs
auto outputTensor = HDICommon::CreateIOTensor(device_);
outputs.emplace_back(outputTensor);
// model run
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
// map memory to get output buffer
auto memAddress = HDICommon::MapMemory(outputs[0].data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
auto buffer = (float *)memAddress;
std::vector<float> expectValue(ADDEND_DATA_SIZE, ADD_VALUE_RESULT);
std::vector<float> outputValue(buffer, buffer + ADDEND_DATA_SIZE);
PrintTensor(buffer, ADDEND_DATA_SIZE);
// check output
EXPECT_TRUE(CheckExpectOutput(outputValue, expectValue)) << "output value check failed.";
// release
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
HDICommon::ReleaseBufferOfTensors(device_, inputs);
HDICommon::ReleaseBufferOfTensors(device_, outputs);
HDICommon::UnmapAllMemory(mapedMemorys);
}
} // namespace
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0200
* @tc.name :
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0200, Function | MediumTest | Level1)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0300
* @tc.name : -fp16
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0300, Function | MediumTest | Level2)
{
bool isFloat16Supported = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsFloat16PrecisionSupported(isFloat16Supported));
if (!isFloat16Supported) {
GTEST_SKIP() << "Float16 precision is not supported.";
}
V2_0::ModelConfig modelConfig = {
.enableFloat16 = true, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0400
* @tc.name :
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0400, Function | MediumTest | Level2)
{
bool isDynamicInputSupported = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsDynamicInputSupported(isDynamicInputSupported));
if (!isDynamicInputSupported) {
GTEST_SKIP() << "Dynamic input is not supported.";
}
V2_0::ModelConfig modelConfig = {
.enableFloat16 = true, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
AddModelTest(device_, modelConfig, true);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0500
* @tc.name : inputs为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0500, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// model config
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
// prepared model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
std::vector<V2_0::IOTensor> inputs;
std::vector<V2_0::IOTensor> outputs;
std::vector<std::vector<int32_t>> outputsDims;
std::vector<bool> isOutputBufferEnough;
vector<void* > mapedMemorys;
// only set outputs
auto outputTensor = HDICommon::CreateIOTensor(device_);
outputs.emplace_back(outputTensor);
// model run, retcode less than HDF_SUCCESS
EXPECT_GT(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
// release
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
HDICommon::ReleaseBufferOfTensors(device_, outputs);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_Run_0600
* @tc.name : outputs为空
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_Run_0600, Function | MediumTest | Level3)
{
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// model config
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
// prepared model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
std::vector<V2_0::IOTensor> inputs;
std::vector<V2_0::IOTensor> outputs;
std::vector<std::vector<int32_t>> outputsDims;
std::vector<bool> isOutputBufferEnough;
vector<void* > mapedMemorys;
// only set inputs
std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
for (uint32_t i = 0; i < inputValue.size(); i++) {
std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
auto tensor = HDICommon::CreateIOTensor(device_);
auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
// set input data
HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
inputs.emplace_back(tensor);
}
// model run
EXPECT_GT(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
// release
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
HDICommon::ReleaseBufferOfTensors(device_, inputs);
HDICommon::UnmapAllMemory(mapedMemorys);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0100
* @tc.name : PERFORMANCE_NONE
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0100, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_NONE, .priority = V2_0::PRIORITY_MEDIUM};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0200
* @tc.name : PERFORMANCE_LOW
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0200, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_LOW, .priority = V2_0::PRIORITY_MEDIUM};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0300
* @tc.name : PERFORMANCE_MEDIUM
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0300, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_MEDIUM, .priority = V2_0::PRIORITY_MEDIUM};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0400
* @tc.name : PERFORMANCE_HIGH
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0400, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_HIGH, .priority = V2_0::PRIORITY_HIGH};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0500
* @tc.name : PERFORMANCE_EXTREME
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0500, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_LOW};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0600
* @tc.name : PRIORITY_NONE
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0600, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_NONE};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0700
* @tc.name : PRIORITY_LOW
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0700, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_HIGH, .priority = V2_0::PRIORITY_LOW};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0800
* @tc.name : PRIORITY_MEDIUM
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0800, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_MEDIUM};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0900
* @tc.name : PRIORITY_HIGH
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_0900, Function | MediumTest | Level2)
{
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
AddModelTest(device_, modelConfig, false);
}
/**
* @tc.number : SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_1000
* @tc.name :
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(ModelRunTest, SUB_AI_NNRt_Func_South_Model_Invoke_CombRun_1000, Function | MediumTest | Level1)
{
bool isModelCacheSupported = false;
EXPECT_EQ(HDF_SUCCESS, device_->IsModelCacheSupported(isModelCacheSupported));
if (!isModelCacheSupported) {
GTEST_SKIP() << "Model cache is not supported.";
}
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
ASSERT_NE(model, nullptr);
V2_0::Model *iModel = nullptr;
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel));
// model config
V2_0::ModelConfig modelConfig = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_HIGH, .priority = V2_0::PRIORITY_HIGH};
// prepared model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModel(*iModel, modelConfig, iPreparedModel));
// export model cache
std::vector<V2_0::SharedBuffer> modelCache;
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->ExportModelCache(modelCache));
// prepared model from cache
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel1;
EXPECT_EQ(HDF_SUCCESS, device_->PrepareModelFromModelCache(modelCache, modelConfig, iPreparedModel1));
std::vector<V2_0::IOTensor> inputs;
std::vector<V2_0::IOTensor> outputs;
std::vector<std::vector<int32_t>> outputsDims;
std::vector<bool> isOutputBufferEnough;
vector<void* > mapedMemorys;
// set inputs
std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
for (uint32_t i = 0; i < inputValue.size(); i++) {
std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
auto tensor = HDICommon::CreateIOTensor(device_);
auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
// set input data
HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
inputs.emplace_back(tensor);
}
// set outputs
auto outputTensor = HDICommon::CreateIOTensor(device_);
outputs.emplace_back(outputTensor);
// model run
EXPECT_EQ(HDF_SUCCESS, iPreparedModel1->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
// map memory to get output buffer
auto memAddress = HDICommon::MapMemory(outputs[0].data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
auto buffer = (float *)memAddress;
std::vector<float> expectValue(ADDEND_DATA_SIZE, ADD_VALUE_RESULT);
std::vector<float> outputValue(buffer, buffer + ADDEND_DATA_SIZE);
// check output
EXPECT_TRUE(CheckExpectOutput(outputValue, expectValue)) << "output value check failed.";
// release
if (tensorBuffer.fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
HDICommon::ReleaseBufferOfTensors(device_, inputs);
HDICommon::ReleaseBufferOfTensors(device_, outputs);
HDICommon::UnmapAllMemory(mapedMemorys);
}

View File

@ -0,0 +1,46 @@
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//build/ohos.gni")
import("//drivers/hdf_core/adapter/uhdf2/uhdf.gni")
import("//test/xts/tools/build/suite.gni")
module_output_path = "hats/nnrt"
#nnrt_path = "//drivers/peripheral/nnrt/"
config("nnrtTest_config") {
visibility = [ ":*" ]
}
ohos_moduletest_suite("HatsHdfNnrtStabilityV2_0Test") {
testonly = true
module_out_path = module_output_path
sources = [
"../common/hdi_nnrt_test_utils.cpp",
"src/hdi_stability_test.cpp",
]
include_dirs = [ "../../v2_0" ]
external_deps = [
"c_utils:utils",
"drivers_interface_nnrt:libnnrt_proxy_2.0",
"hilog_native:libhilog",
"ipc:ipc_single",
"mindspore:mindir",
"neural_network_runtime:nnrt_target",
]
cflags = [ "-Wno-error" ]
public_configs = [ ":nnrtTest_config" ]
}

View File

@ -0,0 +1,18 @@
{
"kits": [
{
"push": [
"HatsHdfNnrtStabilityTest->/data/local/tmp/HatsHdfNnrtStabilityTest"
],
"type": "PushKit"
}
],
"driver": {
"native-test-timeout": "120000",
"type": "CppTest",
"module-name": "HatsHdfNnrtStabilityTest",
"runtime-hint": "1s",
"native-test-device-path": "/data/local/tmp"
},
"description": "Configuration for HatsHdfNnrtStabilityTest Tests"
}

View File

@ -0,0 +1,180 @@
/*
* Copyright (c) 2022-2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sstream>
#include <vector>
#include <thread>
#include <v2_0/nnrt_types.h>
#include <v2_0/innrt_device.h>
#include <v2_0/iprepared_model.h>
#include "gtest/gtest.h"
#include "mindir.h"
#include "mindir_lite_graph.h"
#include "interfaces/kits/c/neural_network_runtime.h"
#include "frameworks/native/memory_manager.h"
#include "common/hdi_nnrt_test_utils.h"
#include "common/hdi_nnrt_test.h"
using namespace std;
using namespace testing::ext;
using namespace OHOS::NeuralNetworkRuntime;
using namespace OHOS::NeuralNetworkRuntime::Test;
namespace {
// number of thread to create
const int THREAD_NUM = 3;
// number of times to run
const int STRESS_COUNT = 100000;
// number of print frequency
const int PRINT_FREQ = 500;
class StabilityTest : public HDINNRtTest {};
void PrepareModelTest(OHOS::sptr<V2_0::INnrtDevice> device, V2_0::Model *iModel)
{
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
V2_0::ModelConfig config;
EXPECT_EQ(HDF_SUCCESS, device->PrepareModel(*iModel, config, iPreparedModel));
}
void RunModelTest(OHOS::sptr<V2_0::INnrtDevice> device, OHOS::sptr<V2_0::IPreparedModel> iPreparedModel)
{
std::vector<V2_0::IOTensor> inputs;
std::vector<V2_0::IOTensor> outputs;
std::vector<std::vector<int32_t>> outputsDims;
std::vector<bool> isOutputBufferEnough;
std::vector<void* > mapedMemorys;
// set inputs
std::vector<float> inputValue = {ADD_VALUE_1, ADD_VALUE_2};
for (uint32_t i = 0; i < inputValue.size(); i++) {
std::vector<float> data(ADDEND_DATA_SIZE, inputValue[i]);
auto tensor = HDICommon::CreateIOTensor(device);
auto memAddress = HDICommon::MapMemory(tensor.data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
// set input data
HDICommon::SetData((float*)memAddress, ADDEND_BUFFER_LENGTH, (float*)data.data());
inputs.emplace_back(tensor);
}
// set outputs
auto outputTensor = HDICommon::CreateIOTensor(device);
outputs.emplace_back(outputTensor);
// model run
EXPECT_EQ(HDF_SUCCESS, iPreparedModel->Run(inputs, outputs, outputsDims, isOutputBufferEnough));
// map memory to get output buffer
auto memAddress = HDICommon::MapMemory(outputs[0].data.fd, ADDEND_BUFFER_LENGTH);
mapedMemorys.emplace_back(memAddress);
auto buffer = (float *)memAddress;
std::vector<float> expectValue(ADDEND_DATA_SIZE, ADD_VALUE_RESULT);
std::vector<float> outputValue(buffer, buffer + ADDEND_DATA_SIZE);
// check output
EXPECT_TRUE(CheckExpectOutput(outputValue, expectValue)) << "output value check failed.";
}
} // namespace
/**
* @tc.number : SUB_AI_NNRt_Reliability_South_Stress_0100
* @tc.name : 线
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(StabilityTest, SUB_AI_NNRt_Reliability_South_Stress_0100, Reliability | MediumTest | Level2)
{
OHOS::sptr<V2_0::INnrtDevice> device = V2_0::INnrtDevice::Get();
std::vector<V2_0::Model *> iModels;
std::vector<OHOS::sptr<V2_0::IPreparedModel>> iPreparedModels;
std::vector<V2_0::SharedBuffer> tensorBuffers;
for (int i = 0; i < THREAD_NUM; i++) {
// build graph with NNModel
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
// convert NNModel to V2_0::Model
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
V2_0::Model *iModel = nullptr;
auto retConvert = HDICommon::ConvertModel(device, model, tensorBuffer, &iModel);
EXPECT_EQ(OH_NN_SUCCESS, retConvert) << "ConvertModel failed";
if (retConvert != OH_NN_SUCCESS) {
break;
}
iModels.emplace_back(iModel);
tensorBuffers.emplace_back(tensorBuffer);
}
for (int i = 0; i < STRESS_COUNT; i++) {
// create threads to prepare model
std::vector<std::thread> threads;
for (auto &iModel : iModels) {
threads.emplace_back(std::thread(PrepareModelTest, device, iModel));
}
// wait for thread finish
for (auto &th : threads) {
th.join();
}
if (i % PRINT_FREQ == 0) {
printf("[NnrtTest] SUB_AI_NNRt_Reliability_South_Stress_0100 times: %d/%d\n", i, STRESS_COUNT);
}
}
for (int i=0; i< iModels.size(); i++) {
mindspore::lite::MindIR_Model_Destroy(&iModels[i]);
if (tensorBuffers[i].fd != -1) {
EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffers[i]));
}
}
}
/**
* @tc.number : SUB_AI_NNR_Reliability_South_Stress_0200
* @tc.name : 线
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(StabilityTest, SUB_AI_NNR_Reliability_South_Stress_0200, Reliability | MediumTest | Level2)
{
OHOS::sptr<V2_0::INnrtDevice> device = V2_0::INnrtDevice::Get();
// build graph with NNModel
OH_NNModel *model = nullptr;
HDICommon::BuildAddGraph(&model);
// convert NNModel to V2_0::Model
V2_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0};
V2_0::Model *iModel = nullptr;
auto retConvert = HDICommon::ConvertModel(device, model, tensorBuffer, &iModel);
ASSERT_EQ(OH_NN_SUCCESS, retConvert) << "ConvertModel failed";
// prepare model
OHOS::sptr<V2_0::IPreparedModel> iPreparedModel;
V2_0::ModelConfig config = {
.enableFloat16 = false, .mode = V2_0::PERFORMANCE_EXTREME, .priority = V2_0::PRIORITY_HIGH};
auto retPrepare = device->PrepareModel(*iModel, config, iPreparedModel);
ASSERT_EQ(HDF_SUCCESS, retPrepare) << "PrepareModel failed";
// run model
for (int i = 0; i < STRESS_COUNT; i++) {
ASSERT_NO_FATAL_FAILURE(RunModelTest(device, iPreparedModel));
if (i % PRINT_FREQ == 0) {
printf("[NnrtTest] SUB_AI_NNRt_Reliability_South_Stress_0200 times: %d/%d\n", i, STRESS_COUNT);
}
}
// release
mindspore::lite::MindIR_Model_Destroy(&iModel);
if (tensorBuffer.fd != -1) {
ASSERT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer));
}
}