fix cache model size

Signed-off-by: w30052974 <wangyifan94@huawei.com>
This commit is contained in:
w30052974 2024-10-18 11:29:30 +08:00
parent b734414d46
commit 527b02827c
2 changed files with 2 additions and 2 deletions

View File

@ -755,7 +755,7 @@ NNRT_API OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation)
configContents.push_back('0');
}
configContents["isExceedRamLimit"] = configContents;
configs["isExceedRamLimit"] = configContents;
compilationImpl->compiler->SetExtensionConfig(configs);
bool isBuild = compilationImpl->compiler->IsBuild();

View File

@ -728,7 +728,7 @@ OH_NN_ReturnCode NNCompiler::SetExtensionConfig(const std::unordered_map<std::st
m_extensionConfig.isNpuFmShared = true;
LOGI("[NNCompiler] SetExtensionConfig NpuFmShared enabled.");
}
if (cofigs.find(EXTENSION_KEY_IS_EXCEED_RAMLIMIT) != configs.end()) {
if (configs.find(EXTENSION_KEY_IS_EXCEED_RAMLIMIT) != configs.end()) {
std::vector<char> value = configs.at(EXTENSION_KEY_IS_EXCEED_RAMLIMIT);
if (value.empty()) {
LOGE("[NNCompiler] SetExtensionConfig get empty model name from configs");