Merge branch 'master' into feature_check_doc

This commit is contained in:
wangcaoyu 2023-11-15 17:49:23 +08:00
commit 241fe156a4
18 changed files with 2639 additions and 561 deletions

View File

@ -131,5 +131,6 @@
{"name": "napi_create_buffer_copy"},
{"name": "napi_create_external_buffer"},
{"name": "napi_get_buffer_info"},
{"name": "napi_queue_async_work_with_qos"}
{"name": "napi_queue_async_work_with_qos"},
{"name": "napi_load_module"}
]

View File

@ -123,6 +123,7 @@ extern "C" {
NAPI_EXTERN napi_status napi_run_script_path(napi_env env, const char* path, napi_value* result);
NAPI_EXTERN napi_status napi_queue_async_work_with_qos(napi_env env, napi_async_work work, napi_qos_t qos);
NAPI_EXTERN napi_status napi_load_module(napi_env env, const char* path, napi_value* result);
#ifdef __cplusplus
}

View File

@ -38,9 +38,36 @@
extern "C" {
#endif
struct OH_NativeBundle_ApplicationInfo {
/**
* Indicates the name of application
* @syscap SystemCapability.BundleManager.BundleFramework.Core
* @since 9
*/
char* bundleName;
/**
* Indicates the fingerprint of application
* @syscap SystemCapability.BundleManager.BundleFramework.Core
* @since 9
*/
char* fingerprint;
/**
* Indicates the ID of the application to which this bundle belongs
* The application ID uniquely identifies an application. It is determined by the bundle name and signature
* @syscap SystemCapability.BundleManager.BundleFramework.Core
* @since 11
*/
char* appId;
/**
* Globally unique identifier of an application.
* AppIdentifier does not change along the application lifecycle, including version updates, certificate changes,
* public and private key changes, and application transfer.
* @syscap SystemCapability.BundleManager.BundleFramework.Core
* @since 11
*/
char* appIdentifier;
};
/**

View File

@ -1,6 +1,8 @@
1.使用该工具前需要修改[constants.py](./src/utils/constants.py)文件下的StringConstant.LIB_CLANG_PATH和StringConstant.REPLACE_WAREHOUSE
StringConstant.LIB_CLANG_PATH:libclang.dll共享库(本地的)
1.使用该工具前需要修改[constants.py](./src/utils/constants.py)
文件下的StringConstant.LIB_CLG_PATH、StringConstant.REPLACE_WAREHOUSE、StringConstant.INCLUDE_LIB
StringConstant.LIB_CLG_PATH:共享库(本地的)
REPLACE_WAREHOUSE拉下来的interface_sdk_c仓的目录(本地的路径) --例如:(去掉磁盘的路径)\\interface_sdk_c
StringConstant.INCLUDE_LIB# 拉到本地仓的三方库路径
2.环境:
1)python-3.11.4-amd64
@ -11,7 +13,7 @@ REPLACE_WAREHOUSE拉下来的interface_sdk_c仓的目录(本地的路径) --
4)需要把src目录设置为sources root(找到src目录点击右键将目标标记为里面)
5)运行的是src目录下的mian.py文件
5)在interface_sdk_c目录下运行的是src目录下的mian.py文件
3.终端指令
options:

View File

@ -1,14 +1,33 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from coreImpl.parser import parser
from coreImpl.check import check
class ToolNameType(enum.Enum):
COLLECT = 'collect'
DIFF = 'diff'
CHECK = 'check'
toolNameTypeSet = [member.value for name,
member in ToolNameType.__members__.items()]
tool_name_type_set = [
member.value for name_tool,
member in ToolNameType.__members__.items()
]
class FormatType(enum.Enum):
@ -16,8 +35,10 @@ class FormatType(enum.Enum):
EXCEL = 'excel'
formatSet = [member.value for name,
member in FormatType.__members__.items()]
format_set = [
member.value for name_format,
member in FormatType.__members__.items()
]
def run_tools(options):
@ -26,6 +47,8 @@ def run_tools(options):
parser.parser(options.parser_path)
elif tool_name == ToolNameType["DIFF"].value:
print("开发中。。。")
elif tool_name == ToolNameType['CHECK'].value:
check.curr_entry(options.parser_path)
else:
print("工具名称错误")
@ -34,5 +57,21 @@ class Config(object):
name = 'parser'
version = '0.1.0'
description = 'Compare the parser the NDKS'
commands = [{"name": "--tool-name", "abbr": "-N", "required": True, "choices": toolNameTypeSet, "type": str, "default": ToolNameType["COLLECT"], "help":"工具名称"},
{"name": "--parser-path", "abbr": "-P", "required": True, "type": str, "help": "解析路径"}]
commands = [
{
"name": "--tool-name",
"abbr": "-N",
"required": True,
"choices": tool_name_type_set,
"type": str,
"default": ToolNameType["COLLECT"],
"help": "工具名称"
},
{
"name": "--parser-path",
"abbr": "-P",
"required": True,
"type": str,
"help": "解析路径"
}
]

View File

@ -0,0 +1,89 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typedef.check.check import ApiResultInfo, FileDocInfo, OutputTxt
from coreImpl.check.check_doc import process_comment, process_file_doc_info
from coreImpl.check.check_name import check_file_name, check_ndk_name
def process_api_json(api_info, file_doc_info: FileDocInfo, api_result_info_list):
api_result_info_list.extend(check_ndk_name(api_info))
if 'comment' in api_info.keys():
comment = api_info['comment']
api_result_info_list.extend(
process_comment(comment, file_doc_info, api_info))
child_node_list = get_api_info_child(api_info)
for child_node in child_node_list:
process_api_json(child_node, file_doc_info, api_result_info_list)
def get_api_info_child(api_info):
if 'children' in api_info.keys():
return api_info['children']
if 'members' in api_info.keys():
return api_info['members']
if 'parm' in api_info.keys():
return api_info['parm']
return []
def process_file_json(file_info, api_result_info_list):
api_result_info_list.extend(check_file_name(file_info['name']))
apis = file_info['children']
file_doc_info = FileDocInfo()
for api in apis:
process_api_json(api, file_doc_info, api_result_info_list)
api_result_info_list.extend(process_file_doc_info(file_doc_info, file_info))
def process_all_json(python_obj) -> list[ApiResultInfo]:
api_result_info_list = []
for file_info in python_obj:
process_file_json(file_info, api_result_info_list)
return api_result_info_list
def write_in_txt(check_result):
txtResul = []
for result in check_result:
location = '{}(line:{}, col:{})'.format(result.location, result.locationLine, result.locationColumn)
message = 'API check error of [{}]:{}'.format(result.errorType['description'], result.errorInfo)
txtResul.append(OutputTxt(result.errorType['id'], result.level, location, result.fileName, message))
txtResul.append('api_check: false')
result_json = json.dumps(txtResul, default=lambda obj: obj.__dict__, indent=4)
fs = open(r'./Error.txt', 'w', encoding='utf-8')
fs.write(result_json)
fs.close()
def curr_entry(file_path):
with open('./src/coreImpl/check/data.json') as json_file:
python_obj = json.load(json_file)
check_result = process_all_json(python_obj)
if len(check_result) == 0:
return
write_in_txt(check_result)
def get_md_files(url) -> list[str]:
file = open(url, "r")
file_list = []
line = file.readline()
while line:
file_list.append(line)
line = file.readline()
file.close()
return file_list

View File

@ -0,0 +1,100 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import os.path
import re
from typedef.check.check import ApiResultInfo, ErrorMessage, ErrorType, LogType
def check_large_hump(api_info):
return processing_check_data('LARGE_HUMP', api_info)
def check_function_name(api_info):
return processing_check_data('CHECK_FUNCTION_NAME', api_info)
def check_small_hump(api_info):
return processing_check_data('SMALL_HUMP', api_info)
def check_all_uppercase_hump(api_info):
return processing_check_data('ALL_UPPERCASE_HUMP', api_info)
def check_global_variable(api_info):
return processing_check_data('GLOBAL_VARIABLE', api_info)
def check_file_name(file_path):
api_result_info_list = []
file_name = os.path.basename(file_path)
result = re.match(CheckName['FILE_NAME'].value, file_name)
if result is None:
error_info = ErrorMessage.TRANSLATION_UNIT.value
api_result_info = ApiResultInfo(ErrorType.NAMING_ERRORS.value, error_info, '')
api_result_info.set_type(LogType.LOG_FILE.value)
api_result_info.set_level(2)
api_result_info_list.append(api_result_info)
return api_result_info_list
def processing_check_data(function_type, api_info):
api_result_info_list = []
name = api_info['name']
result = re.match(CheckName[function_type].value, name)
if result is None:
api_result_info = ApiResultInfo(ErrorType.NAMING_ERRORS.value,
ErrorMessage[api_info['kind']].value, name)
api_result_info.set_location_line(api_info['location']['location_line'])
api_result_info.set_location_column(api_info['location']['location_column'])
api_result_info.set_location(api_info['location']['location_path'])
api_result_info.set_type(LogType.LOG_API.value)
api_result_info.set_level(2)
api_result_info.set_file_name(api_info['location']['location_path'])
api_result_info_list.append(api_result_info)
return api_result_info_list
class CheckName(enum.Enum):
LARGE_HUMP = r'^([A-Z][a-z0-9]*)*$'
SMALL_HUMP = r'^([a-z][A-Z0-9]*)*$'
ALL_UPPERCASE_HUMP = r'^[A-Z]+[0-9]*([\_][A-Z0-9]+)*$'
GLOBAL_VARIABLE = r'^g_([a-z][A-Z0-9]*)*$'
FILE_NAME = r'^[a-z]+[a-z0-9]+([\_][a-z0-9]+)*\.h$'
CHECK_FUNCTION_NAME = r'^([OH|OS]+([\_]([A-Z]+[a-z0-9]*)+)*)|(([A-Z][a-z0-9]*)*)$'
process_tag_function = {
'FUNCTION_DECL': check_function_name,
'STRUCT_DECL': check_large_hump,
'ENUM_DECL': check_large_hump,
'UNION_DECL': check_large_hump,
'VAR_DECL': check_small_hump,
'PARM_DECL': check_small_hump,
'FIELD_DECL': check_small_hump,
'MACRO_DEFINITION': check_all_uppercase_hump,
'ENUM_CONSTANT_DECL': check_all_uppercase_hump,
}
def check_ndk_name(api_info) -> list[ApiResultInfo]:
api_result_info_list = []
kind = api_info['kind']
if kind not in process_tag_function.keys():
return api_result_info_list
name_process = process_tag_function[kind]
return name_process(api_info)

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,27 @@
import json
import pandas as pd # 用于生成表格
import os
import pandas as pd # 用于生成表格
def compare_json_file(js_file1, js_file2): # 获取对比结果
def compare_json_file(js_file1, js_file2): # 获取对比结果
with open(js_file1, 'r', encoding='utf-8') as js1:
data1 = json.load(js1)
with open(js_file2, 'r') as js2:
data2 = json.load(js2)
compare_result = []
only_file1 = [] # 装file1独有的
only_file1 = [] # 装file1独有的
result_api = filter_compare(data1)
for i in range(len(result_api)):
name1 = result_api[i]["name"]
for it in result_api:
name1 = it["name"]
key = 0
for item in data2:
if item["name"]:
name2 = item["name"]
if name1 == name2:
key = 1
compare_result.append(result_api[i])
break
if item["name"] == name1:
key = 1
compare_result.append(it)
break
if key == 0:
only_file1.append(result_api[i])
only_file2 = get_difference_data(compare_result, data2) # 获取file2独有的
only_file1.append(it)
only_file2 = get_difference_data(compare_result, data2) # 获取file2独有的
return compare_result, only_file1, only_file2
@ -33,8 +31,8 @@ def get_difference_data(compare_result, data2):
for item in data2:
name2 = item["name"]
key = 0
for j in range(len(compare_result)):
name1 = compare_result[j]["name"]
for it in compare_result:
name1 = it["name"]
if name2 == name1:
key = 1
break
@ -43,43 +41,50 @@ def get_difference_data(compare_result, data2):
return only_file2
def filter_compare(data1): # 获取函数和变量
def filter_compare(data1): # 获取函数和变量
result_api = []
for i in range(len(data1)):
for item1 in data1[i]["children"]: # 抛开根节点
for it in data1:
for item1 in it["children"]: # 抛开根节点
if (item1["kind"] == 'FUNCTION_DECL' or item1["kind"] == 'VAR_DECL') and item1["is_extern"]:
item = filter_func(item1)
result_api.append(item)
return result_api
def get_parm(item, parm):
if item["parm"]:
for i in range(len(item["parm"])):
if item["parm"][i]["kind"] != 'PARM_DECL':
continue
else:
str_parm = item["parm"][i]["type"] + ' ' + item["parm"][i]["name"]
parm.append(str_parm)
item["parm"] = parm
def filter_func(item):
del item["is_extern"] # 剔除is_extern键值对过滤后都是extern
del item["is_extern"] # 剔除is_extern键值对过滤后都是extern
del item["comment"]
item["location_path"] = item["location"]["location_path"]
item["location"] = item["location"]["location_line"]
if item["kind"] == 'FUNCTION_DECL':
item["kind"] = '函数类型'
parm = [] # 装函数参数
parm = [] # 装函数参数
if "parm" in item:
if item["parm"]:
for i in range(len(item["parm"])):
if item["parm"][i]["kind"] != 'PARM_DECL':
continue
else:
str_parm = item["parm"][i]["type"] + ' ' + item["parm"][i]["name"]
parm.append(str_parm)
item["parm"] = parm
get_parm(item, parm)
else:
item["kind"] = '变量类型'
return item
def generate_excel(array, name, only_file1, only_file2):
pf = pd.DataFrame.from_dict(array, orient='columns') # 将列表转成DataFrame并且按列的方式读取数据(orient='columns')
# 将列表转成DataFrame并且按列的方式读取数据(orient='columns')
pf = pd.DataFrame.from_dict(array, orient='columns')
pf1 = pd.DataFrame(only_file1)
pf2 = pd.DataFrame(only_file2)
columns_map = { # 将列名换为中文名
# 将列名换为中文名
columns_map = {
'name': '名称',
'kind': '节点类型',
'type': '类型',
@ -89,8 +94,9 @@ def generate_excel(array, name, only_file1, only_file2):
'return_type': '返回类型',
'parm': '参数'
}
pf.rename(columns=columns_map, inplace=True)
with pd.ExcelWriter(name) as writer: # 生成该表格
with pd.ExcelWriter(name) as writer: # 生成该表格
pf.to_excel(writer, sheet_name='对比结果', index=False)
pf1.to_excel(writer, sheet_name='生成json独有', index=False)
pf2.to_excel(writer, sheet_name='原json独有', index=False)
@ -102,15 +108,16 @@ def increase_sheet(array, name, sheet):
pf.to_excel(writer, sheet_name=sheet, index=False)
def get_json_file(json_file_new, json_file): # 获取生成的json文件
json_file1 = r'{}'.format(json_file_new) # 获取要对比的json文件
def get_json_file(json_file_new, json_file): # 获取生成的json文件
json_file1 = r'{}'.format(json_file_new) # 获取要对比的json文件
json_file2 = json_file
head_name = os.path.splitext(json_file1) # 去掉文件名后缀
head_name = head_name[0] + '.xlsx' # 加后缀
head_name = os.path.splitext(json_file1) # 去掉文件名后缀
head_name = head_name[0] + '.xlsx' # 加后缀
result_list = []
only_file1 = []
only_file2 = []
for i in range(len(json_file2)): # 对比每一个json(目录下的)
result_list, only_file1, only_file2 = compare_json_file(json_file1, json_file2[i]) # 对比两个json文件
for item in json_file2: # 对比每一个json(目录下的)
# 对比两个json文件
result_list, only_file1, only_file2 = compare_json_file(json_file1, item)
return result_list, head_name, only_file1, only_file2 # 返回对比数据,和所需表格名
return result_list, head_name, only_file1, only_file2 # 返回对比数据,和所需表格名

View File

@ -1,26 +1,26 @@
import re
import clang.cindex
from clang.cindex import Config # 配置
from clang.cindex import Index # 主要API
from clang.cindex import CursorKind # 索引结点的类别
from clang.cindex import TypeKind # 节点的语义类别
import os
import clang.cindex
from clang.cindex import Config
from clang.cindex import Index
from clang.cindex import CursorKind
from clang.cindex import TypeKind
from utils.constants import StringConstant
def find_parent(cursor): # 获取父节点
def find_parent(cursor): # 获取父节点
cursor_parent = cursor.semantic_parent
if cursor_parent is not None:
if cursor_parent.kind == CursorKind.VAR_DECL: # 父节点为VAR_DECL 用于整型变量节点
return cursor_parent.kind
elif cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL: # 用于判断里面成员属于那类
# 用于判断里面成员属于那类
elif cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL:
return cursor_parent.kind
else:
parent = cursor_parent.semantic_parent
if parent is not None:
return parent.kind
else:
return None
def processing_no_child(cursor, data): # 处理没有子节点的节点
@ -41,34 +41,41 @@ def processing_no_child(cursor, data): # 处理没有子节点的节点
data["integer_value"] = token.spelling # 获取整型变量值
def get_complex_def(tokens_new, count_token, tokens, data):
count = 1
logo = 0
logo_com = 0
count_com = 0
for token_2 in tokens_new:
if token_2.spelling == ')':
logo = 1
break
else:
count += 1
if count_token == count:
pass
elif logo == 1: # 获取复合型宏定义宏名
logo_com = logo
count_com = count + 1
tokens_name = tokens[:count + 1]
data["name"] = ''.join([token.spelling for token in tokens_name])
return logo_com, count_com
def processing_complex_def(tokens, data): # 处理复合型宏
tokens_new = tokens[1:] # 跳过正常宏名
logo_com = 0 # 记录复合型,复合型文本也得根据这个
count_com = 0
count_token = len(tokens_new) # value ()
count_token = len(tokens_new) # value ()
for token in tokens_new:
if token.kind.name == 'KEYWORD':
break
if token.kind.name == 'IDENTIFIER':
count = 1
logo = 0
for token_2 in tokens_new:
if token_2.spelling == ')':
logo = 1
break
else:
count += 1
if count_token == count:
pass
elif logo == 1: # 获取复合型宏定义宏名
logo_com = logo
count_com = count + 1
tokens_name = tokens[:count + 1]
data["name"] = ''.join([token.spelling for token in tokens_name])
logo_com, count_com = get_complex_def(tokens_new, count_token, tokens, data)
get_def_text(tokens, data, logo_com, count_com) # 获取宏文本
def get_def_text(tokens, data, logo_compose, count_compose): # 获取宏文本
def get_def_text(tokens, data, logo_compose, count_compose): # 获取宏文本
if logo_compose == 1:
marco_expansion = ''.join([token.spelling for token in tokens[count_compose:]]) # 获取宏文本,有就记录,没有不管
if marco_expansion:
@ -76,7 +83,7 @@ def get_def_text(tokens, data, logo_compose, count_compose):
else:
pass
else:
marco_expansion = ''.join([token.spelling for token in tokens[1:]]) # 获取宏文本,有就记录,没有不管
marco_expansion = ''.join([token.spelling for token in tokens[1:]]) # 获取宏文本,有就记录,没有不管
if marco_expansion:
data["text"] = marco_expansion
else:
@ -91,13 +98,11 @@ def get_token(cursor):
return tokens
def judgment_extern(cursor, data): # 判断是否带有extern
is_extern = None
def judgment_extern(cursor, data): # 判断是否带有extern
tokens = get_token(cursor)
if cursor.kind == CursorKind.FUNCTION_DECL:
if 'static' in tokens:
is_extern = False
# elif 'deprecated' in tokens and ('attribute' in tokens or '__declspec' in tokens):
elif 'deprecated' in tokens:
is_extern = False
else:
@ -109,13 +114,11 @@ def judgment_extern(cursor, data):
is_extern = False
else:
is_extern = True
if is_extern:
data["is_extern"] = is_extern
else:
data["is_extern"] = is_extern
data["is_extern"] = is_extern
def binary_operator(cursor, data): # 二元操作符处理
def binary_operator(cursor, data): # 二元操作符处理
data["name"] = "binary_ope_no_spelling"
tokens = cursor.get_tokens()
spelling_arr = ['<<', '>>', '+', '-', '*', '/']
@ -124,29 +127,29 @@ def binary_operator(cursor, data):
data["operator"] = token.spelling
def distinction_member(cursor, data): # 区别结构体和联合体成员
parent_kind = find_parent(cursor) # 查找父节点类型
def distinction_member(cursor, data): # 区别结构体和联合体成员
parent_kind = find_parent(cursor) # 查找父节点类型
if parent_kind == CursorKind.UNION_DECL:
data["member"] = "union_member"
elif parent_kind == CursorKind.STRUCT_DECL:
data["member"] = "struct_member"
def processing_parm(cursor, data): # 函数参数节点处理
if cursor.spelling: # 函数参数是否带参数名
def processing_parm(cursor, data): # 函数参数节点处理
if cursor.spelling: # 函数参数是否带参数名
data["name"] = cursor.spelling
else:
data["name"] = "arg_no_spelling"
if cursor.type.get_pointee().kind == TypeKind.FUNCTIONPROTO: # 参数为函数指针,获取对应的返回类型
if cursor.type.get_pointee().kind == TypeKind.FUNCTIONPROTO: # 参数为函数指针,获取对应的返回类型
data["func_pointer_result_type"] = cursor.type.get_pointee().get_result().spelling
def processing_enum(cursor, data): # 获取枚举值
def processing_enum(cursor, data): # 获取枚举值
data["value"] = cursor.enum_value
def processing_def(cursor, data): # 处理宏定义
def processing_def(cursor, data): # 处理宏定义
marco_ext = cursor.extent
tokens = cursor.translation_unit.get_tokens(extent=marco_ext) # 找到对应的宏定义位置
tokens = list(tokens) # Generator转为list
@ -154,21 +157,20 @@ def processing_def(cursor, data):
data["type"] = "def_no_type"
def processing_func(cursor, data): # 处理函数
def processing_func(cursor, data): # 处理函数
data["return_type"] = cursor.result_type.spelling # 增加返回类型键值对
judgment_extern(cursor, data)
def processing_type(cursor, data): # 没有类型的节点处理
def processing_type(cursor, data): # 没有类型的节点处理
if cursor.kind == CursorKind.MACRO_INSTANTIATION: # 也属于宏定义 --宏引用
data["type"] = "insta_no_type"
elif cursor.kind == CursorKind.INCLUSION_DIRECTIVE: # 头文件也没type规定
data["type"] = "inclusion_no_type"
return
def processing_name(cursor, data): # 没有名的节点处理
def processing_name(cursor, data): # 没有名的节点处理
if cursor.kind == CursorKind.PAREN_EXPR: # 括号表达式()
data["paren"] = "()"
data["name"] = "paren_expr_no_spelling"
@ -177,7 +179,7 @@ def processing_name(cursor, data):
data["name"] = "unexposed_expr_no_spelling"
def processing_char(cursor, data): # 字符节点处理
def processing_char(cursor, data): # 字符节点处理
tokens = list(cursor.get_tokens())
char_value = (tokens[0].spelling.strip("'"))
data["name"] = char_value
@ -199,22 +201,24 @@ special_node_process = {
}
def processing_special_node(cursor, data, gn_path=None): # 处理需要特殊处理的节点
def processing_special_node(cursor, data, gn_path=None): # 处理需要特殊处理的节点
loc = {
"location_path": '{}'.format(cursor.location.file.name),
"location_line": cursor.location.line,
"location_column": cursor.location.column
}
relative_path = os.path.relpath(cursor.location.file.name, gn_path) # 获取头文件相对路
loc["location_path"] = relative_path
if gn_path:
relative_path = os.path.relpath(cursor.location.file.name, gn_path) # 获取头文件相对路
loc["location_path"] = relative_path
data["location"] = loc
if cursor.kind.name in special_node_process.keys():
node_process = special_node_process[cursor.kind.name]
node_process(cursor, data) # 调用对应节点处理函数
node_process(cursor, data) # 调用对应节点处理函数
def ast_to_dict(cursor, current_file, gn_path=None, comment=None): # 解析数据的整理
data = { # 通用
# 通用
data = {
"name": cursor.spelling,
"kind": cursor.kind.name,
"type": cursor.type.spelling,
@ -237,23 +241,30 @@ def ast_to_dict(cursor, current_file, gn_path=None, comment=None): # 解析数
if len(children) > 0:
if cursor.kind == CursorKind.FUNCTION_DECL: # 函数参数
name = "parm"
elif cursor.kind == CursorKind.ENUM_DECL or cursor.kind == CursorKind.STRUCT_DECL or cursor.kind == CursorKind.UNION_DECL:
elif (cursor.kind == CursorKind.ENUM_DECL
or cursor.kind == CursorKind.STRUCT_DECL
or cursor.kind == CursorKind.UNION_DECL):
name = "members"
else:
name = "children"
data[name] = []
for child in children:
if child.location.file is not None and child.kind != CursorKind.UNEXPOSED_ATTR: # 剔除多余宏定义和跳过UNEXPOSED_ATTR节点
if child.location.file.name == current_file:
child_data = ast_to_dict(child, current_file, gn_path)
data[name].append(child_data)
else:
pass
processing_ast_node(children, current_file, data, name, gn_path)
else:
processing_no_child(cursor, data) # 处理没有子节点的节点
return data
def processing_ast_node(children, current_file, data, name, gn_path):
for child in children:
# 剔除多余宏定义和跳过UNEXPOSED_ATTR节点
if child.location.file is not None and child.kind != CursorKind.UNEXPOSED_ATTR:
if child.location.file.name == current_file:
child_data = ast_to_dict(child, current_file, gn_path)
data[name].append(child_data)
else:
pass
def preorder_travers_ast(cursor, total, comment, current_file, gn_path=None): # 获取属性
ast_dict = ast_to_dict(cursor, current_file, gn_path, comment) # 获取节点属性
total.append(ast_dict) # 追加到数据统计列表里面
@ -272,14 +283,13 @@ def get_start_comments(include_path): # 获取每个头文件的最开始注释
return matches
def api_entrance(share_lib, include_path, gn_path, link_path=None): # 统计入口
def api_entrance(share_lib, include_path, gn_path=None, link_path=None): # 统计入口
# clang.cindex需要用到libclang.dll共享库 所以配置共享库
if Config.loaded:
print("config.loaded == true")
else:
Config.set_library_file(share_lib)
print("lib.dll: install path")
# 创建AST索引
index = Index.create()
print('=' * 50)
@ -290,26 +300,26 @@ def api_entrance(share_lib, include_path, gn_path, link_path=None): # 统计入
print(args)
data_total = [] # 列表对象-用于统计
for i in range(len(include_path)): # 对每个头文件做处理
tu = index.parse(include_path[i], args=args, options=options)
for item in include_path: # 对每个头文件做处理
tu = index.parse(item, args=args, options=options)
print(tu)
print('=' * 50)
ast_root_node = tu.cursor # 获取根节点
print(ast_root_node)
matches = get_start_comments(include_path[i]) # 接收文件最开始的注释
matches = get_start_comments(item) # 接收文件最开始的注释
# 前序遍历AST
preorder_travers_ast(ast_root_node, data_total, matches, include_path[i], gn_path) # 调用处理函数
preorder_travers_ast(ast_root_node, data_total, matches, item, gn_path) # 调用处理函数
print('=' * 50)
return data_total
def get_include_file(libclang, include_file_path, link_path, gn_path=None): # 库路径、.h文件路径、链接头文件路径
def get_include_file(include_file_path, link_path, gn_path=None): # 库路径、.h文件路径、链接头文件路径
# libclang.dll库路径
libclang_path = libclang
libclang_path = StringConstant.LIB_CLG_PATH.value
# c头文件的路径
file_path = include_file_path
print(file_path)
# 头文件链接路径
link_include_path = link_path # 可以通过列表传入
data = api_entrance(libclang_path, file_path, gn_path, link_include_path) # 调用接口

View File

@ -1,212 +1,237 @@
import os # 可用于操作目录文件
import glob # 可用于查找指定目录下指定后缀的文件
import re # 正则表达是模块--可用于操作文件里面的内容
import shutil # 拷贝文件
from coreImpl.parser import parse_include, generating_tables # 引入解析文件 # 引入得到结果表格文件
import json
import os
import glob
import re
import shutil
from utils.constants import StringConstant
from coreImpl.parser import parse_include, generating_tables # 引入解析文件 # 引入得到结果表格文件
def find_gn_file(directory): # 找指定目录下所有GN文件
def find_gn_file(directory): # 找指定目录下所有GN文件
gn_files = []
for root, dirs, files in os.walk(directory): # dirpath, dirnames, filenames(对应信息)
for root, _, files in os.walk(directory): # dirpath, dirnames, filenames(对应信息)
for file in files:
if file.endswith(".gn"):
gn_files.append(os.path.join(root, file))
return gn_files
def find_function_file(file, function_name): # 在GN文件中查找指定函数并在有函数名获取对应sources的值
with open(file, 'r') as f:
content = f.read() # 获取文件内容
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
matches = re.findall(pattern, content)
f.seek(0) # 回到文件开始位置
if len(matches): # 是否匹配成功
sources = [] # 转全部匹配的sources的.h(可能不止一个-headers函数)
f.seek(0)
end = 0 # 记录光标
for i in range(len(matches)):
# 匹配sources = \[[^\]]*\](匹配方括号内的内容,其中包括一个或多个非右括号字符),\s*匹配0个或多个空白字符
pattern = r'sources\s*=\s*\[[^\]]*\]'
sources_match = re.search(pattern, content)
if sources_match:
sources_value = sources_match.group(0) # 获取完整匹配的字符串
sources_value = re.sub(r'\s', '', sources_value) # 去除源字符串的空白字符(换行符)和空格
pattern = r'"([^"]+h)"' # 匹配引号中的内容,找对应的.h
source = re.findall(pattern, sources_value)
sources.extend(source)
end += sources_match.end() # 每次找完一个sources的.h路径记录光标结束位置
f.seek(end) # 移动光标在该结束位置
content = f.read() # 从当前位置读取问价内容,防止重复
return len(matches) > 0, sources
else:
return None, None # gn文件没有对应的函数
def find_h_file(matches, f, sources):
for mat in matches:
# 匹配sources = \[[^\]]*\](匹配方括号内的内容,其中包括一个或多个非右括号字符),\s*匹配0个或多个空白字符
f.seek(mat.span()[0])
content = f.read()
pattern = r'sources\s*=\s*\[[^\]]*\]'
sources_match = re.search(pattern, content)
if sources_match:
sources_value = sources_match.group(0) # 获取完整匹配的字符串
sources_value = re.sub(r'\s', '', sources_value) # 去除源字符串的空白字符(换行符)和空格
pattern = r'"([^"]+h)"' # 匹配引号中的内容,找对应的.h
source = re.findall(pattern, sources_value)
sources.extend(source)
def get_dest_dir(file, function_name): # 获取dest_dir
def find_function_file(file, function_name): # 在GN文件中查找指定函数并在有函数名获取对应sources的值
with open(file, 'r') as f:
content = f.read() # 获取文件内容
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
content = f.read() # 获取文件内容
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
matches = re.finditer(pattern, content) # finditer会返回位置信息
f.seek(0) # 回到文件开始位置
sources = [] # 装全部匹配的sources的.h(可能不止一个-headers函数)
if matches: # 是否匹配成功
find_h_file(matches, f, sources)
print("where", sources)
return matches, sources
def find_dest_dir(matches, content, f):
sources_dir = []
if matches:
end = 0
for _ in matches:
pattern = r'dest_dir\s*=\s*"([^"]*)"'
source_match = re.search(pattern, content)
if source_match:
con = source_match.group(1)
sources_dir.append(con)
end += source_match.end() # 每次找完一个sources的.h路径记录光标结束位置
f.seek(end) # 移动光标在该结束位置
content = f.read()
return sources_dir
def get_dest_dir(file, function_name): # 获取dest_dir
with open(file, 'r') as f:
content = f.read() # 获取文件内容
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
matches = re.findall(pattern, content)
f.seek(0)
if matches:
sources_dir = []
f.seek(0)
end = 0
for i in range(len(matches)):
pattern = r'dest_dir\s*=\s*"([^"]*)"'
source_match = re.search(pattern, content)
if source_match:
con = source_match.group(1)
con_real = con[1:]
sources_dir.append(con)
end += source_match.end() # 每次找完一个sources的.h路径记录光标结束位置
f.seek(end) # 移动光标在该结束位置
content = f.read()
return sources_dir
else:
return None
sources_dir = find_dest_dir(matches, content, f)
return sources_dir
def find_json_file(gn_file_match): # 找gn文件同级目录下的.json文件
def find_json_file(gn_file_match): # 找gn文件同级目录下的.json文件
match_json_file = []
directory = os.path.dirname(gn_file_match)
for file in glob.glob(os.path.join(directory, "*.json")): # 统计.json文件
for file in glob.glob(os.path.join(directory, "*.json")): # 统计.json文件
match_json_file.append(file)
return match_json_file
# def get_
def dire_func(gn_file, func_name): # 统计数据的
matches_file_total = [] # 统计有ohos_ndk_headers函数的gn文件
json_file_total = [] # 统计跟含有函数的gn文件同级的json文件
source_include = [] # 统计sources里面的.h
length, source = find_function_file(gn_file, func_name) # 找到包含函数的gn文件和同级目录下的.json文件
if length: # 保证两个都不为空source可能为空
source_include = source # 获取头文件列表
matches_file_total.append(gn_file) # 调用匹配函数的函数(说明有对应的函数、source)
json_file_total.extend(find_json_file(gn_file)) # 找json
def dire_func(gn_file, func_name): # 统计数据的
matches_file_total = [] # 统计有ohos_ndk_headers函数的gn文件
json_file_total = [] # 统计跟含有函数的gn文件同级的json文件
source_include = [] # 统计sources里面的.h
matches, source = find_function_file(gn_file, func_name) # 找到包含函数的gn文件
if matches: # 保证两个都不为空source可能为空
source_include = source # 获取头文件列表
matches_file_total.append(gn_file) # 调用匹配函数的函数(说明有对应的函数、source)
json_file_total.extend(find_json_file(gn_file)) # 同级目录下的.json文件
return matches_file_total, json_file_total, source_include
def change_json_file(dict_data, name): # 生成json文件
file_name = name + '_new' + '.json' # json文件名
with open(file_name, 'w', encoding='UTF-8') as f: # encoding='UTF-8'能显示中文
def change_json_file(dict_data, name): # 生成json文件
file_name = name + '_new' + '.json' # json文件名
with open(file_name, 'w', encoding='UTF-8') as f: # encoding='UTF-8'能显示中文
# ensure_ascii=False确保能显示中文indent=4(格式控制)使生成的json样式跟字典一样
json.dump(dict_data, f, ensure_ascii=False, indent=4)
return file_name
def change_abs(include_files, dire_path): # 获取.h绝对路径
def change_abs(include_files, dire_path): # 获取.h绝对路径
abs_path = []
for j in range(len(include_files)): # 拼接路径,生成绝对路径
for j_item in include_files: # 拼接路径,生成绝对路径
# os.path.normpath(path):规范或者是格式化路径,它会把所有路径分割符按照操作系统进行替换
# 把规范路径和gn文件对应的目录路径拼接
if os.path.isabs(include_files[j]): # 是否是绝对路径是就拼接路径盘不是就拼接gn目录路径
head = os.path.splitdrive(dire_path) # 获取windows盘路径
include_file = os.path.normpath(include_files[j])
include_file = include_file.replace('\\\\interface\\sdk_c', StringConstant.REPLACE_WAREHOUSE.value) # 去掉绝对路径的双\\,替换为interface_sdk_c
if os.path.isabs(j_item): # 是否是绝对路径是就拼接路径盘不是就拼接gn目录路径
head = os.path.splitdrive(dire_path) # 获取windows盘路径
include_file = os.path.normpath(j_item)
if 'third_party/node/src' in j_item:
include_file = include_file.replace('\\\\', StringConstant.REPLACE_WAREHOUSE.value + '\\')
else:
# 去掉绝对路径的双\\,替换为interface_sdk_c
include_file = include_file.replace('\\\\interface\\sdk_c',
StringConstant.REPLACE_WAREHOUSE.value)
if head:
include_file = os.path.join(head[0], include_file) # 拼接盘和路径
include_file = os.path.join(head[0], include_file) # 拼接盘和路径
abs_path.append(include_file)
else:
relative_path = os.path.abspath(os.path.join(dire_path, os.path.normpath(include_files[j]))) # ../ .解决
relative_path = os.path.abspath(os.path.join(dire_path, os.path.normpath(j_item))) # ../ .解决
abs_path.append(relative_path)
print("头文件绝对路径:\n", abs_path)
print("=" * 50)
return abs_path
def get_result_table(json_files, abs_path, lib_path, link_path, gn_path): # 进行处理,生成表格
def get_result_table(json_files, abs_path, link_path, gn_path): # 进行处理,生成表格
result_list = []
head_name = ""
only_file1 = []
only_file2 = []
if json_files:
file_name = os.path.split(json_files[0]) # 取第一个json名但我是用列表装的
file_name = os.path.splitext(file_name[1]) # 取下标1对应的元素(元组)
data = parse_include.get_include_file(lib_path, abs_path, link_path, gn_path) # 获取解析返回的数据
parse_json_name = change_json_file(data, file_name[0]) # 生成json文件
result_list, head_name, only_file1, only_file2 = generating_tables.get_json_file(parse_json_name, json_files) # 解析完后传两个json文件对比两个json文件最后生成数据表格
return result_list, head_name, only_file1, only_file2
else:
return None, None
file_name = os.path.split(json_files[0]) # 取第一个json名但我是用列表装的
file_name = os.path.splitext(file_name[1]) # 取下标1对应的元素(元组)
data = parse_include.get_include_file(abs_path, link_path, gn_path) # 获取解析返回的数据
parse_json_name = change_json_file(data, file_name[0]) # 生成json文件
# 解析完后传两个json文件对比两个json文件最后生成数据表格
result_list, head_name, only_file1, only_file2 = generating_tables.get_json_file(parse_json_name,
json_files)
return result_list, head_name, only_file1, only_file2
def create_dir(sources_dir, gn_file, function_name, link_include_file):
for i in range(len(sources_dir)):
directory = sources_dir[i]
new_dire = os.path.join('sysroot', directory)
new_dire = os.path.normpath(new_dire)
if not os.path.exists(new_dire):
os.makedirs(new_dire)
if sources_dir:
for item in sources_dir:
directory = item
new_dire = os.path.join('sysroot', directory)
new_dire = os.path.normpath(new_dire)
if not os.path.exists(new_dire):
os.makedirs(new_dire)
else:
print("目录已存在")
else:
print("目录已存在")
if new_dire in link_include_file:
pass
else:
link_include_file.append(new_dire) # 添加链接的头文件
match_files, json_files, include_files = dire_func(gn_file, function_name)
dire_path = os.path.dirname(gn_file) # 获取gn文件路径
if match_files:
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
for j in range(len(abs_path)):
shutil.copy(abs_path[j], new_dire)
else:
print("在create_dir函数中原因gn文件条件不满足")
if new_dire in link_include_file:
pass
else:
link_include_file.append(new_dire) # 添加链接的头文件
match_files, json_files, include_files = dire_func(gn_file, function_name)
dire_path = os.path.dirname(gn_file) # 获取gn文件路径
if match_files:
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
for j_item in abs_path:
shutil.copy(j_item, new_dire)
else:
print("在create_dir函数中原因gn文件条件不满足")
else:
print("gn文件没有ohos_sdk_headers")
def link_include(directory_path, function_names, link_include_file):
gn_file_total = find_gn_file(directory_path) # 查找gn文件
for i in range(len(gn_file_total)): # 处理每个gn文件
sources_dir = get_dest_dir(gn_file_total[i], function_names)
create_dir(sources_dir, gn_file_total[i], function_names, link_include_file)
gn_file_total = find_gn_file(directory_path) # 查找gn文件
for item in gn_file_total: # 处理每个gn文件
sources_dir = get_dest_dir(item, function_names)
if sources_dir:
create_dir(sources_dir, item, function_names, link_include_file)
def main_entrance(directory_path, function_names, lib_path, link_path): # 主入口
gn_file_total = find_gn_file(directory_path) # 查找gn文件
for i in range(len(gn_file_total)): # 处理每个gn文件
match_files, json_files, include_files = dire_func(gn_file_total[i], function_names)
dire_path = os.path.dirname(gn_file_total[i]) # 获取gn文件路径
def main_entrance(directory_path, function_names, link_path): # 主入口
gn_file_total = find_gn_file(directory_path) # 查找gn文件
result_list_total = []
only_file1_total = []
only_file2_total = []
for item in gn_file_total: # 处理每个gn文件
match_files, json_files, include_files = dire_func(item, function_names)
dire_path = os.path.dirname(item) # 获取gn文件路径
print("目录路径: {}".format(dire_path))
print("同级json文件\n", json_files)
print("头文件:\n", include_files)
if match_files: # 符合条件的gn文件
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
result_list, head_name, only_file1, only_file2 = get_result_table(json_files, abs_path, lib_path, link_path, dire_path) # 接收对比结果信息
generating_tables.generate_excel(result_list, head_name, only_file1, only_file2) # 转为表格
if result_list:
print("有匹配项,已生成表格")
if include_files: # 符合条件的gn文件
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
print("头文件绝对路径:\n", abs_path)
result_list, head_name, only_file1, only_file2 = get_result_table(json_files, abs_path,
link_path, dire_path) # 接收对比结果信息
if len(result_list) != 0:
result_list_total.extend(result_list)
only_file1_total.extend(only_file1)
only_file2_total.extend(only_file2)
elif head_name == "":
print("gn文件下无json文件")
else:
print("没有匹配项 or gn文件下无json文件")
generating_tables.generate_excel(result_list, head_name, only_file1, only_file2)
print("没有匹配项")
else:
print("gn文件无header函数")
head_name = "result_total.xlsx" # 总结果表格
generating_tables.generate_excel(result_list_total, head_name, only_file1_total, only_file2_total)
def copy_std_lib(link_include_file):
std_include = r'sysroot\ndk_musl_include_files'
if not os.path.exists(std_include):
shutil.copytree(r'third_party\musl\ndk_musl_include', std_include)
shutil.copytree(StringConstant.INCLUDE_LIB.value, std_include)
link_include_file.append(std_include)
def find_include(link_include_path):
for dir_path, dir_name, file_name in os.walk('sysroot\\$ndk_headers_out_dir'):
for dir_path, _, _ in os.walk('sysroot\\$ndk_headers_out_dir'):
link_include_path.append(dir_path)
def parser(directory_path): # 目录路径
function_name = StringConstant.FUNK_NAME.value # 匹配的函数名
libclang_path = StringConstant.LIB_CLANG_PATH.value # 共享库路径
def parser(directory_path): # 目录路径
function_name = StringConstant.FUNK_NAME.value # 匹配的函数名
link_include_path = [] # 装链接头文件路径
copy_std_lib(link_include_path) # ndk头文件移到sysroot中
link_include_path = [] # 装链接头文件路径
copy_std_lib(link_include_path) # ndk头文件移到sysroot中
find_include(link_include_path)
link_include(directory_path, function_name, link_include_path)
main_entrance(directory_path, function_name, libclang_path, link_include_path) # 调用入口函数
main_entrance(directory_path, function_name, link_include_path) # 调用入口函数
def parser_include_ast(gn_file_path, include_path):
link_path = [StringConstant.INCLUDE_LIB.value]
data = parse_include.get_include_file(include_path, link_path, gn_file_path)
return data

View File

@ -1,21 +1,22 @@
import argparse
from bin import config
parser = argparse.ArgumentParser(
prog=config.Config.name, description=config.Config.description)
for command in config.Config.commands:
abbr = command.get("abbr")
name = command.get("name")
choices = command.get("choices")
required = (True if command.get("required") else False)
type = command.get("type")
default = command.get("default")
help = command.get("help")
parser.add_argument(abbr, name, choices=choices,
required=required, type=type, default=default, help=help)
def main_function():
parser = argparse.ArgumentParser(
prog=config.Config.name, description=config.Config.description)
for command in config.Config.commands:
arg_abbr = command.get("abbr")
arg_name = command.get("name")
arg_choices = command.get("choices")
arg_required = (True if command.get("required") else False)
arg_type = command.get("type")
default = command.get("default")
arg_help = command.get("help")
parser.add_argument(arg_abbr, arg_name, choices=arg_choices,
required=arg_required, type=arg_type, default=default, help=arg_help)
config.run_tools(parser.parse_args())
# args = parser.parse_args()
config.run_tools(parser.parse_args())
main_function()

View File

@ -0,0 +1,269 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class TAGS(enum.Enum):
ADD_TO_GROUP = 'addtogroup'
BRIEF = 'brief'
DEPRECATED = 'deprecated'
FILE = 'file'
LIBRARY = 'library'
PARAM = 'param'
PERMISSION = 'permission'
RETURN = 'return'
SINCE = 'since'
SYSCAP = 'syscap'
LEFT_BRACE = '{'
RIGHT_BRACE = '}'
class ErrorType(enum.Enum):
DEFAULT = {
'id': -1,
'description': '',
}
EMPTY_TAG = {
'id': 0,
'description': "空标签",
}
MISSPELL_WORDS = {
'id': 1,
'description': 'misspell words',
}
NAMING_ERRORS = {
'id': 2,
'description': 'naming errors',
}
UNKNOW_DEPRECATED = {
'id': 5,
'description': 'unknow deprecated',
}
WRONG_VALUE = {
'id': 7,
'description': 'wrong value',
}
WRONG_SCENE = {
'id': 8,
'description': 'wrong scene',
}
class LogType(enum.Enum):
DEFAULT = ''
LOG_API = 'Api'
LOG_JSDOC = 'JsDoc'
LOG_FILE = 'File'
class ErrorLevel(enum.Enum):
HIGH = 3
MIDDLE = 2
LOW = 1
class ErrorMessage(enum.Enum):
EMPTY_TAG = 'the [$$] tag value is empty. Please supplement the default value'
REPEAT_FILE_TAG = 'the [$$] tag is repeat. Please check the tag in file'
ERROR_INFO_VALUE_TAG = 'the [$$] tag value is incorrect. Please check the usage method'
ERROR_INFO_VALUE_LIBRARY = 'the [library] tag value is incorrect. This tag must be end with .so or .a, or is NA. Please check the usage method'
ERROR_INFO_VALUE_PARAM = 'the value of the [$$] [param] tag is incorrect. Please check if it matches the [$$] parameter name'
ERROR_INFO_COUNT_PARAM = 'the count of the [param] tag is wrong. Please check the parameters and Doc'
ERROR_INFO_VALUE_PERMISSION = 'the [permission] tag value is incorrect. Please check if the permission field has been configured or update the configuration file'
ERROR_INFO_VALUE_SINCE = 'the [since] tag value is incorrect. Please check if the tag value is a numerical value'
ERROR_INFO_VALUE_SYSCAP = 'the [syscap] tag value is incorrect. Please check if the syscap field is configured'
ERROR_USE_LEFT_BRACE = 'the validity verification of the Doc tag failed. The [{] tag is not allowed to used in Doc which not has addtogroup tag, or used in the wrong place.'
ERROR_REPEAT_LEFT_BRACE = 'the validity verification of the Doc tag failed. The [{] tag is not allowed to reuse in Doc which has addtogroup tag.'
ERROR_USE_RIGHT_BRACE = 'the validity verification of the JSDoc tag failed. The [}] tag is not allowed to be reused please delete the extra tags.'
ERROR_FILE_HAS_ONE_LOSE_OTHER = 'the file has the $$, but do not has the $$.'
ERROR_FILE_LOSE_ONE = 'the file missing $$'
FUNCTION_DECL = 'Function naming should use the big hump naming style or beginning with OH/OS,and using "_" segmentation.'
STRUCT_DECL = 'Structure type naming should use the big hump naming style.'
ENUM_DECL = 'Enum type naming should use the big hump naming style.'
UNION_DECL = 'Consortium type naming should use the big hump naming style.'
VAR_DECL = 'Variable naming should use the small hump naming style.'
PARM_DECL = 'Function parameters naming should use the small hump naming style.'
MACRO_PARAMETERS_TYPE_NAMING_ERROR = 'Macro parameters naming should use the small hump naming style.'
FIELD_DECL = 'Fields in the structure naming should use the small hump naming style.'
MEMBERS_OF_THE_CONSORTIUM_TYPE_NAMING_ERROR = 'Members of the consortium naming should use the small hump naming style.'
MACRO_DEFINITION = 'Macro naming should use all uppercase, separated by underscores naming style.'
ENUM_CONSTANT_DECL = 'Enum value naming should use all uppercase, separated by underscores naming style.'
GOTO_LABEL_TYPE_NAMING_ERROR = 'Goto label value naming should use all uppercase, separated by underscores naming style.'
GLOBAL_VARIABLE_TYPE_NAMING_ERROR = 'Global variable should increase "g_" prefix.'
TRANSLATION_UNIT = 'File naming should be all lowercase, separated by underscores.'
class OutputTxt:
id = -1
level = -1
location = ''
filePath = ''
message = ''
def __init__(self, id, level, location, file_path, message):
self.id = id
self.level = level
self.location = location
self.filePath = file_path
self.message = message
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_level(self):
return self.level
def set_level(self, level):
self.level = level
def get_location(self):
return self.location
def set_location(self, location):
self.location = location
def get_file_path(self):
return self.filePath
def set_file_path(self, file_path):
self.filePath = file_path
def get_message(self):
return self.message
def set_message(self, message):
self.message = message
class ApiResultInfo:
errorType: ErrorType = ErrorType.DEFAULT.value
errorInfo = ''
level: ErrorLevel = -1
apiName = ''
apiFullText = ''
fileName = ''
location = ''
locationLine = -1
locationColumn = -1
type: LogType = LogType.DEFAULT.value
version = -1
basename = ''
def __init__(self, error_type=None, error_info='', api_name=''):
if error_type is None:
error_type = ErrorType.DEFAULT.value
self.errorType = error_type
self.errorInfo = error_info
self.apiName = api_name
def get_error_type(self):
return self.errorType
def set_error_type(self, error_type):
self.errorType = error_type
def get_file_name(self):
return self.fileName
def set_file_name(self, file_name):
self.fileName = file_name
def get_type(self):
return self.type
def set_type(self, type):
self.type = type
def get_error_info(self):
return self.errorInfo
def set_error_info(self, error_info):
self.errorInfo = error_info
def get_version(self):
return self.version
def set_version(self, version):
self.version = version
def get_basename(self):
return self.basename
def set_basename(self, basename):
self.basename = basename
def get_level(self):
return self.level
def set_level(self, level):
self.level = level
def get_api_name(self):
return self.apiName
def set_api_name(self, api_name):
self.apiName = api_name
def get_api_full_text(self):
return self.apiFullText
def set_api_full_text(self, api_full_text):
self.apiFullText = api_full_text
def get_location_line(self):
return self.locationLine
def set_location_line(self, location_line):
self.locationLine = location_line
def get_location_column(self):
return self.locationColumn
def set_location_column(self, location_column):
self.locationColumn = location_column
def get_location(self):
return self.location
def set_location(self, location):
self.location = location
class DocInfo:
group = ''
brief = ''
deprecated = ''
file = ''
permission = ''
since = ''
syscap = ''
param_index = -1
throw_index = -1
class FileDocInfo:
is_in_group_tag = False
group_name = None
has_group_start = False
has_group_end = False
is_in_file_tag = False
file_name = None
file_brief = None
file_library = None
file_syscap = None
curr_doc_info = DocInfo()

View File

@ -2,6 +2,8 @@ import enum
class StringConstant(enum.Enum):
LIB_CLANG_PATH = r'D:\Environment\LLVM\bin\libclang.dll'
LIB_CLG_PATH = r'D:\Environment\LLVM\bin\libclang.dll' # 共享库
FUNK_NAME = "ohos_ndk_headers"
REPLACE_WAREHOUSE = '\\interface_sdk_c\\interface_sdk_c'
REPLACE_WAREHOUSE = '\\interface_sdk_c\\interface_sdk_c' # 拉到本地仓路径(去掉磁盘)
# 拉到本地仓的三方库绝对路径
INCLUDE_LIB = r'third_party\musl\ndk_musl_include'

View File

@ -250,5 +250,81 @@
{
"first_introduced": "10",
"name": "OH_AI_TensorSetUserData"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainCfgCreate"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainCfgDestroy"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainCfgGetLossName"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainCfgSetLossName"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainCfgGetOptimizationLevel"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainCfgSetOptimizationLevel"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainModelBuild"
},
{
"first_introduced": "11",
"name": "OH_AI_TrainModelBuildFromFile"
},
{
"first_introduced": "11",
"name": "OH_AI_RunStep"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelSetLearningRate"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelGetLearningRate"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelGetWeights"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelUpdateWeights"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelGetTrainMode"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelSetTrainMode"
},
{
"first_introduced": "11",
"name": "OH_AI_ModelSetupVirtualBatch"
},
{
"first_introduced": "11",
"name": "OH_AI_ExportModel"
},
{
"first_introduced": "11",
"name": "OH_AI_ExportModelBuffer"
},
{
"first_introduced": "11",
"name": "OH_AI_ExportWeightsCollaborateWithMicro"
}
]

View File

@ -18,7 +18,7 @@
* @addtogroup MindSpore
* @{
*
* @brief MindSpore Lite的模型推理相关接口
* @brief provide the model reasoning related interfaces of MindSpore Lite.
*
* @Syscap SystemCapability.Ai.MindSpore
* @since 9
@ -27,7 +27,7 @@
/**
* @file model.h
*
* @brief
* @brief provide model-related interfaces that can be used for model creation, model reasoning, and more.
*
* @library libmindspore_lite_ndk.so
* @since 9
@ -45,6 +45,8 @@ extern "C" {
typedef void *OH_AI_ModelHandle;
typedef void *OH_AI_TrainCfgHandle;
typedef struct OH_AI_TensorHandleArray {
size_t handle_num;
OH_AI_TensorHandle *handle_list;
@ -66,13 +68,15 @@ typedef bool (*OH_AI_KernelCallBack)(const OH_AI_TensorHandleArray inputs, const
/**
* @brief Create a model object.
*
* @return Model object handle.
* @since 9
*/
OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate();
OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate(void);
/**
* @brief Destroy the model object.
*
* @param model Model object handle address.
* @since 9
*/
@ -80,6 +84,7 @@ OH_AI_API void OH_AI_ModelDestroy(OH_AI_ModelHandle *model);
/**
* @brief Build the model from model file buffer so that it can run on a device.
*
* @param model Model object handle.
* @param model_data Define the buffer read from a model file.
* @param data_size Define bytes number of model file buffer.
@ -93,6 +98,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *mod
/**
* @brief Load and build the model from model path so that it can run on a device.
*
* @param model Model object handle.
* @param model_path Define the model file path.
* @param model_type Define The type of model file.
@ -105,6 +111,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const c
/**
* @brief Resizes the shapes of inputs.
*
* @param model Model object handle.
* @param inputs The array that includes all input tensor handles.
* @param shape_infos Defines the new shapes of inputs, should be consistent with inputs.
@ -117,6 +124,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_Te
/**
* @brief Inference model.
*
* @param model Model object handle.
* @param inputs The array that includes all input tensor handles.
* @param outputs The array that includes all output tensor handles.
@ -131,6 +139,7 @@ OH_AI_API OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_T
/**
* @brief Obtains all input tensor handles of the model.
*
* @param model Model object handle.
* @return The array that includes all input tensor handles.
* @since 9
@ -139,6 +148,7 @@ OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle m
/**
* @brief Obtains all output tensor handles of the model.
*
* @param model Model object handle.
* @return The array that includes all output tensor handles.
* @since 9
@ -147,6 +157,7 @@ OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle
/**
* @brief Obtains the input tensor handle of the model by name.
*
* @param model Model object handle.
* @param tensor_name The name of tensor.
* @return The input tensor handle with the given name, if the name is not found, an NULL is returned.
@ -156,6 +167,7 @@ OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHa
/**
* @brief Obtains the output tensor handle of the model by name.
*
* @param model Model object handle.
* @param tensor_name The name of tensor.
* @return The output tensor handle with the given name, if the name is not found, an NULL is returned.
@ -163,6 +175,227 @@ OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHa
*/
OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
/**
* @brief Create a TrainCfg object. Only valid for Lite Train.
*
* @return TrainCfg object handle.
* @since 11
*/
OH_AI_API OH_AI_TrainCfgHandle OH_AI_TrainCfgCreate(void);
/**
* @brief Destroy the train_cfg object. Only valid for Lite Train.
*
* @param train_cfg TrainCfg object handle.
* @since 11
*/
OH_AI_API void OH_AI_TrainCfgDestroy(OH_AI_TrainCfgHandle *train_cfg);
/**
* @brief Obtains part of the name that identify a loss kernel. Only valid for Lite Train.
*
* @param train_cfg TrainCfg object handle.
* @param num The num of loss_name.
* @return loss_name.
* @since 11
*/
OH_AI_API char **OH_AI_TrainCfgGetLossName(OH_AI_TrainCfgHandle train_cfg, size_t *num);
/**
* @brief Set part of the name that identify a loss kernel. Only valid for Lite Train.
*
* @param train_cfg TrainCfg object handle.
* @param loss_name Define part of the name that identify a loss kernel.
* @param num The num of loss_name.
* @since 11
*/
OH_AI_API void OH_AI_TrainCfgSetLossName(OH_AI_TrainCfgHandle train_cfg, const char **loss_name, size_t num);
/**
* @brief Obtains optimization level of the train_cfg. Only valid for Lite Train.
*
* @param train_cfg TrainCfg object handle.
* @return OH_AI_OptimizationLevel.
* @since 11
*/
OH_AI_API OH_AI_OptimizationLevel OH_AI_TrainCfgGetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg);
/**
* @brief Set optimization level of the train_cfg. Only valid for Lite Train.
*
* @param train_cfg TrainCfg object handle.
* @param level The optimization level of train_cfg.
* @since 11
*/
OH_AI_API void OH_AI_TrainCfgSetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg, OH_AI_OptimizationLevel level);
/**
* @brief Build the train model from model buffer so that it can run on a device. Only valid for Lite Train.
*
* @param model Model object handle.
* @param model_data Define the buffer read from a model file.
* @param data_size Define bytes number of model file buffer.
* @param model_type Define The type of model file.
* @param model_context Define the context used to store options during execution.
* @param train_cfg Define the config used by training.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size,
OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context,
const OH_AI_TrainCfgHandle train_cfg);
/**
* @brief Build the train model from model file buffer so that it can run on a device. Only valid for Lite Train.
*
* @param model Model object handle.
* @param model_path Define the model path.
* @param model_type Define The type of model file.
* @param model_context Define the context used to store options during execution.
* @param train_cfg Define the config used by training.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_TrainModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path,
OH_AI_ModelType model_type,
const OH_AI_ContextHandle model_context,
const OH_AI_TrainCfgHandle train_cfg);
/**
* @brief Train model by step. Only valid for Lite Train.
*
* @param model Model object handle.
* @param before CallBack before predict.
* @param after CallBack after predict.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_RunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before,
const OH_AI_KernelCallBack after);
/**
* @brief Sets the Learning Rate of the training. Only valid for Lite Train.
*
* @param learning_rate to set.
* @return OH_AI_Status of operation.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ModelSetLearningRate(OH_AI_ModelHandle model, float learning_rate);
/**
* @brief Obtains the Learning Rate of the optimizer. Only valid for Lite Train.
*
* @param model Model object handle.
* @return Learning rate. 0.0 if no optimizer was found.
* @since 11
*/
OH_AI_API float OH_AI_ModelGetLearningRate(OH_AI_ModelHandle model);
/**
* @brief Obtains all weights tensors of the model. Only valid for Lite Train.
*
* @param model Model object handle.
* @return The vector that includes all gradient tensors.
* @since 11
*/
OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetWeights(OH_AI_ModelHandle model);
/**
* @brief update weights tensors of the model. Only valid for Lite Train.
*
* @param new_weights A vector new weights.
* @return OH_AI_Status
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ModelUpdateWeights(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray new_weights);
/**
* @brief Get the model running mode.
*
* @param model Model object handle.
* @return Is Train Mode or not.
* @since 11
*/
OH_AI_API bool OH_AI_ModelGetTrainMode(OH_AI_ModelHandle model);
/**
* @brief Set the model running mode. Only valid for Lite Train.
*
* @param model Model object handle.
* @param train True means model runs in Train Mode, otherwise Eval Mode.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ModelSetTrainMode(OH_AI_ModelHandle model, bool train);
/**
* @brief Setup training with virtual batches. Only valid for Lite Train.
*
* @param model Model object handle.
* @param virtual_batch_multiplier Virtual batch multiplier, use any number < 1 to disable.
* @param lr Learning rate to use for virtual batch, -1 for internal configuration.
* @param momentum Batch norm momentum to use for virtual batch, -1 for internal configuration.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ModelSetupVirtualBatch(OH_AI_ModelHandle model, int virtual_batch_multiplier, float lr,
float momentum);
/**
* @brief Export training model from file. Only valid for Lite Train.
*
* @param model The model data.
* @param model_type The model file type.
* @param model_file The exported model file.
* @param quantization_type The quantification type.
* @param export_inference_only Whether to export a reasoning only model.
* @param output_tensor_name The set the name of the output tensor of the exported reasoning model, default as
* empty, and export the complete reasoning model.
* @param num The number of output_tensor_name.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ExportModel(OH_AI_ModelHandle model, OH_AI_ModelType model_type, const char *model_file,
OH_AI_QuantizationType quantization_type, bool export_inference_only,
char **output_tensor_name, size_t num);
/**
* @brief Export training model from buffer. Only valid for Lite Train.
*
* @param model The model data.
* @param model_type The model file type.
* @param model_data The exported model buffer.
* @param data_size The exported model buffer size.
* @param quantization_type The quantification type.
* @param export_inference_only Whether to export a reasoning only model.
* @param output_tensor_name The set the name of the output tensor of the exported reasoning model, default as
* empty, and export the complete reasoning model.
* @param num The number of output_tensor_name.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ExportModelBuffer(OH_AI_ModelHandle model, OH_AI_ModelType model_type, void *model_data,
size_t *data_size, OH_AI_QuantizationType quantization_type,
bool export_inference_only, char **output_tensor_name, size_t num);
/**
* @brief Export model's weights, which can be used in micro only. Only valid for Lite Train.
*
* @param model The model data.
* @param model_type The model file type.
* @param weight_file The path of exported weight file.
* @param is_inference Whether to export weights from a reasoning model. Currently, only support this is `true`.
* @param enable_fp16 Float-weight is whether to be saved in float16 format.
* @param changeable_weights_name The set the name of these weight tensors, whose shape is changeable.
* @param num The number of changeable_weights_name.
* @return OH_AI_Status.
* @since 11
*/
OH_AI_API OH_AI_Status OH_AI_ExportWeightsCollaborateWithMicro(OH_AI_ModelHandle model, OH_AI_ModelType model_type,
const char *weight_file, bool is_inference,
bool enable_fp16, char **changeable_weights_name,
size_t num);
#ifdef __cplusplus
}
#endif

View File

@ -18,7 +18,7 @@
* @addtogroup MindSpore
* @{
*
* @brief MindSpore Lite的模型推理相关接口
* @brief provide the model reasoning related interfaces of MindSpore Lite.
*
* @Syscap SystemCapability.Ai.MindSpore
* @since 9
@ -27,7 +27,7 @@
/**
* @file types.h
*
* @brief MindSpore Lite支持的模型文件类型和设备类型
* @brief provides the model file types and device types supported by MindSpore Lite.
*
* @library libmindspore_lite_ndk.so
* @since 9
@ -47,57 +47,120 @@ extern "C" {
#endif
#endif
/**
* @brief model file type.
*
* @since 9
*/
typedef enum OH_AI_ModelType {
OH_AI_MODELTYPE_MINDIR = 0,
// insert new data type here
OH_AI_MODELTYPE_INVALID = 0xFFFFFFFF
/** the model type is MindIR, and the corresponding model file extension is .ms. */
OH_AI_MODELTYPE_MINDIR = 0,
/** invaild model type */
OH_AI_MODELTYPE_INVALID = 0xFFFFFFFF
} OH_AI_ModelType;
/**
* @brief device type information.
*
* @since 9
*/
typedef enum OH_AI_DeviceType {
OH_AI_DEVICETYPE_CPU = 0,
OH_AI_DEVICETYPE_GPU,
OH_AI_DEVICETYPE_KIRIN_NPU,
// add new type here
// ohos-only device range: [60, 80)
OH_AI_DEVICETYPE_NNRT = 60,
OH_AI_DEVICETYPE_INVALID = 100,
/** cpu */
OH_AI_DEVICETYPE_CPU = 0,
/** gpu */
OH_AI_DEVICETYPE_GPU,
/** kirin npu */
OH_AI_DEVICETYPE_KIRIN_NPU,
/** nnrt device, ohos-only device range: [60, 80) */
OH_AI_DEVICETYPE_NNRT = 60,
/** invalid device type */
OH_AI_DEVICETYPE_INVALID = 100,
} OH_AI_DeviceType;
/**
* @brief the hard deivce type managed by NNRT.
*
* @since 10
*/
typedef enum OH_AI_NNRTDeviceType {
/** Devices that are not CPU, GPU, or dedicated accelerator */
OH_AI_NNRTDEVICE_OTHERS = 0,
/** CPU device */
OH_AI_NNRTDEVICE_CPU = 1,
/** GPU device */
OH_AI_NNRTDEVICE_GPU = 2,
/** Dedicated hardware accelerator */
OH_AI_NNRTDEVICE_ACCELERATOR = 3,
/** Devices that are not CPU, GPU, or dedicated accelerator */
OH_AI_NNRTDEVICE_OTHERS = 0,
/** CPU device */
OH_AI_NNRTDEVICE_CPU = 1,
/** GPU device */
OH_AI_NNRTDEVICE_GPU = 2,
/** Dedicated hardware accelerator */
OH_AI_NNRTDEVICE_ACCELERATOR = 3,
} OH_AI_NNRTDeviceType;
/**
* @brief performance mode of the NNRT hard deivce.
*
* @since 10
*/
typedef enum OH_AI_PerformanceMode {
/** No performance mode preference */
OH_AI_PERFORMANCE_NONE = 0,
/** Low power consumption mode*/
OH_AI_PERFORMANCE_LOW = 1,
/** Medium performance mode */
OH_AI_PERFORMANCE_MEDIUM = 2,
/** High performance mode */
OH_AI_PERFORMANCE_HIGH = 3,
/** Ultimate performance mode */
OH_AI_PERFORMANCE_EXTREME = 4
/** No performance mode preference */
OH_AI_PERFORMANCE_NONE = 0,
/** Low power consumption mode*/
OH_AI_PERFORMANCE_LOW = 1,
/** Medium performance mode */
OH_AI_PERFORMANCE_MEDIUM = 2,
/** High performance mode */
OH_AI_PERFORMANCE_HIGH = 3,
/** Ultimate performance mode */
OH_AI_PERFORMANCE_EXTREME = 4
} OH_AI_PerformanceMode;
/**
* @brief NNRT reasoning task priority.
*
* @since 10
*/
typedef enum OH_AI_Priority {
/** No priority preference */
OH_AI_PRIORITY_NONE = 0,
/** Low priority */
OH_AI_PRIORITY_LOW = 1,
/** Medium priority */
OH_AI_PRIORITY_MEDIUM = 2,
/** High priority */
OH_AI_PRIORITY_HIGH = 3
/** No priority preference */
OH_AI_PRIORITY_NONE = 0,
/** Low priority */
OH_AI_PRIORITY_LOW = 1,
/** Medium priority */
OH_AI_PRIORITY_MEDIUM = 2,
/** High priority */
OH_AI_PRIORITY_HIGH = 3
} OH_AI_Priority;
/**
* @brief optimization level for train model.
*
* @since 11
*/
typedef enum OH_AI_OptimizationLevel {
/** Do not change */
OH_AI_KO0 = 0,
/** Cast network to float16, keep batchnorm and loss in float32 */
OH_AI_KO2 = 2,
/** Cast network to float16, including bacthnorm */
OH_AI_KO3 = 3,
/** Choose optimization based on device */
OH_AI_KAUTO = 4,
/** Invalid optimizatin level */
OH_AI_KOPTIMIZATIONTYPE = 0xFFFFFFFF
} OH_AI_OptimizationLevel;
/**
* @brief quantization type
*
* @since 11
*/
typedef enum OH_AI_QuantizationType {
/** Do not change */
OH_AI_NO_QUANT = 0,
/** weight quantization */
OH_AI_WEIGHT_QUANT = 1,
/** full quantization */
OH_AI_FULL_QUANT = 2,
/** invalid quantization type */
OH_AI_UNKNOWN_QUANT_TYPE = 0xFFFFFFFF
} OH_AI_QuantizationType;
typedef struct NNRTDeviceDesc NNRTDeviceDesc;
#ifdef __cplusplus
}

View File

@ -1,254 +0,0 @@
[
{
"first_introduced": "9",
"name": "OH_AI_ContextCreate"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextDestroy"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextSetThreadNum"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextGetThreadNum"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextSetThreadAffinityMode"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextGetThreadAffinityMode"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextSetThreadAffinityCoreList"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextGetThreadAffinityCoreList"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextSetEnableParallel"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextGetEnableParallel"
},
{
"first_introduced": "9",
"name": "OH_AI_ContextAddDeviceInfo"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoCreate"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoDestroy"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoSetProvider"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoGetProvider"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoSetProviderDevice"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoGetProviderDevice"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoGetDeviceType"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoSetEnableFP16"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoGetEnableFP16"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoSetFrequency"
},
{
"first_introduced": "9",
"name": "OH_AI_DeviceInfoGetFrequency"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelCreate"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelDestroy"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelBuild"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelBuildFromFile"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelResize"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelPredict"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelGetInputs"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelGetOutputs"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelGetInputByTensorName"
},
{
"first_introduced": "9",
"name": "OH_AI_ModelGetOutputByTensorName"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorCreate"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorDestroy"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorClone"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorSetName"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetName"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorSetDataType"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetDataType"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorSetShape"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetShape"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorSetFormat"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetFormat"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorSetData"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetData"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetMutableData"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetElementNum"
},
{
"first_introduced": "9",
"name": "OH_AI_TensorGetDataSize"
},
{
"first_introduced": "10",
"name": "OH_AI_GetAllNNRTDeviceDescs"
},
{
"first_introduced": "10",
"name": "OH_AI_DestroyAllNNRTDeviceDescs"
},
{
"first_introduced": "10",
"name": "OH_AI_GetDeviceIdFromNNRTDeviceDesc"
},
{
"first_introduced": "10",
"name": "OH_AI_GetNameFromNNRTDeviceDesc"
},
{
"first_introduced": "10",
"name": "OH_AI_GetTypeFromNNRTDeviceDesc"
},
{
"first_introduced": "10",
"name": "OH_AI_CreateNNRTDeviceInfoByName"
},
{
"first_introduced": "10",
"name": "OH_AI_CreateNNRTDeviceInfoByType"
},
{
"first_introduced": "10",
"name": "OH_AI_DeviceInfoSetDeviceId"
},
{
"first_introduced": "10",
"name": "OH_AI_DeviceInfoGetDeviceId"
},
{
"first_introduced": "10",
"name": "OH_AI_DeviceInfoSetPerformanceMode"
},
{
"first_introduced": "10",
"name": "OH_AI_DeviceInfoGetPerformanceMode"
},
{
"first_introduced": "10",
"name": "OH_AI_DeviceInfoSetPriority"
},
{
"first_introduced": "10",
"name": "OH_AI_GetElementOfNNRTDeviceDescs"
},
{
"first_introduced": "10",
"name": "OH_AI_DeviceInfoAddExtension"
},
{
"first_introduced": "10",
"name": "OH_AI_TensorSetUserData"
}
]