mirror of
https://gitee.com/openharmony/interface_sdk_c
synced 2024-11-23 06:39:54 +00:00
capi工具修改目录和注释
Signed-off-by: zhangwu <zhangwu47@huawei.com>
This commit is contained in:
parent
0a18b89246
commit
0abf2b7046
@ -68,16 +68,16 @@ def write_in_txt(check_result, output_path):
|
|||||||
|
|
||||||
|
|
||||||
def result_to_json(check_result):
|
def result_to_json(check_result):
|
||||||
txtResul = []
|
txt_resul = []
|
||||||
if len(check_result) == 0:
|
if len(check_result) == 0:
|
||||||
txtResul.append('api_check: false')
|
txt_resul.append('api_check: false')
|
||||||
else:
|
else:
|
||||||
for result in check_result:
|
for result in check_result:
|
||||||
location = f'{result.location}(line:{result.locationLine}, col:{result.locationColumn})'
|
location = f'{result.location}(line:{result.locationLine}, col:{result.locationColumn})'
|
||||||
message = 'API check error of [{}]:{}'.format(result.errorType['description'], result.errorInfo)
|
message = 'API check error of [{}]:{}'.format(result.errorType['description'], result.errorInfo)
|
||||||
txtResul.append(OutputTxt(result.errorType['id'], result.level, location, result.fileName, message))
|
txt_resul.append(OutputTxt(result.errorType['id'], result.level, location, result.fileName, message))
|
||||||
txtResul.append('api_check: false')
|
txt_resul.append('api_check: false')
|
||||||
return json.dumps(txtResul, default=lambda obj: obj.__dict__, indent=4)
|
return json.dumps(txt_resul, default=lambda obj: obj.__dict__, indent=4)
|
||||||
|
|
||||||
|
|
||||||
def curr_entry(pr_id):
|
def curr_entry(pr_id):
|
||||||
@ -90,7 +90,8 @@ def curr_entry(pr_id):
|
|||||||
def get_check_result_list(file_list):
|
def get_check_result_list(file_list):
|
||||||
check_result_list = []
|
check_result_list = []
|
||||||
for file in file_list:
|
for file in file_list:
|
||||||
root_path = file.split('sdk_c')[0] + 'sdk_c'
|
root_start = file.split('sdk_c')[0]
|
||||||
|
root_path = f'{root_start}sdk_c'
|
||||||
python_obj = parser_include_ast(root_path, [file])
|
python_obj = parser_include_ast(root_path, [file])
|
||||||
check_result_list.extend(process_all_json(python_obj))
|
check_result_list.extend(process_all_json(python_obj))
|
||||||
check_result_list.extend(check_syntax(file))
|
check_result_list.extend(check_syntax(file))
|
@ -56,7 +56,7 @@ def get_line_and_column(location):
|
|||||||
return ['', '']
|
return ['', '']
|
||||||
|
|
||||||
|
|
||||||
def get_original(result_child):
|
def get_original(result_child: str):
|
||||||
if len(result_child) == 0:
|
if len(result_child) == 0:
|
||||||
return result_child
|
return result_child
|
||||||
original = result_child.lstrip().split("\r\n")
|
original = result_child.lstrip().split("\r\n")
|
||||||
@ -66,13 +66,13 @@ def get_original(result_child):
|
|||||||
return original[1]
|
return original[1]
|
||||||
if len(original) == 4:
|
if len(original) == 4:
|
||||||
return original[2]
|
return original[2]
|
||||||
|
return ''
|
||||||
|
|
||||||
|
|
||||||
def get_specified_string(target_string):
|
def get_specified_string(target_string):
|
||||||
message_type = 'error'
|
message_type = 'error'
|
||||||
function_result = []
|
function_result = []
|
||||||
pattern = r'error: (.*?)\r\n'
|
pattern = r'error: (.*?)\r\n'
|
||||||
global matches
|
|
||||||
matches = re.findall(pattern, target_string, re.DOTALL)
|
matches = re.findall(pattern, target_string, re.DOTALL)
|
||||||
if len(matches) == 0:
|
if len(matches) == 0:
|
||||||
pattern = r'warning: (.*?)\r\n'
|
pattern = r'warning: (.*?)\r\n'
|
||||||
@ -90,7 +90,7 @@ def get_specified_string(target_string):
|
|||||||
return function_result
|
return function_result
|
||||||
|
|
||||||
|
|
||||||
def get_file_path(file_path):
|
def get_file_path(file_path: str):
|
||||||
if len(file_path) == 0:
|
if len(file_path) == 0:
|
||||||
return file_path
|
return file_path
|
||||||
path_split_len = len(file_path.split('\r\n'))
|
path_split_len = len(file_path.split('\r\n'))
|
||||||
@ -101,3 +101,4 @@ def get_file_path(file_path):
|
|||||||
return path_list[1]
|
return path_list[1]
|
||||||
if path_split_len == 3:
|
if path_split_len == 3:
|
||||||
return path_list[2]
|
return path_list[2]
|
||||||
|
return ''
|
@ -1,5 +1,23 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
##############################################
|
||||||
|
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
##############################################
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import openpyxl
|
||||||
import pandas as pd # 用于生成表格
|
import pandas as pd # 用于生成表格
|
||||||
|
|
||||||
|
|
||||||
@ -22,7 +40,8 @@ def compare_json_file(js_file1, js_file2): # 获取对比结果
|
|||||||
if key == 0:
|
if key == 0:
|
||||||
only_file1.append(it)
|
only_file1.append(it)
|
||||||
only_file2 = get_difference_data(compare_result, data2) # 获取file2独有的
|
only_file2 = get_difference_data(compare_result, data2) # 获取file2独有的
|
||||||
|
js1.close()
|
||||||
|
js2.close()
|
||||||
return compare_result, only_file1, only_file2
|
return compare_result, only_file1, only_file2
|
||||||
|
|
||||||
|
|
||||||
@ -65,6 +84,11 @@ def get_parm(item, parm):
|
|||||||
def filter_func(item):
|
def filter_func(item):
|
||||||
del item["is_extern"] # 剔除is_extern键值对,过滤后都是extern
|
del item["is_extern"] # 剔除is_extern键值对,过滤后都是extern
|
||||||
del item["comment"]
|
del item["comment"]
|
||||||
|
if "type_ref" in list(item.keys()):
|
||||||
|
del item["type_ref"]
|
||||||
|
if "children" in list(item.keys()):
|
||||||
|
del item["children"]
|
||||||
|
|
||||||
item["location_path"] = item["location"]["location_path"]
|
item["location_path"] = item["location"]["location_path"]
|
||||||
item["location"] = item["location"]["location_line"]
|
item["location"] = item["location"]["location_line"]
|
||||||
if item["kind"] == 'FUNCTION_DECL':
|
if item["kind"] == 'FUNCTION_DECL':
|
||||||
@ -74,38 +98,67 @@ def filter_func(item):
|
|||||||
get_parm(item, parm)
|
get_parm(item, parm)
|
||||||
else:
|
else:
|
||||||
item["kind"] = '变量类型'
|
item["kind"] = '变量类型'
|
||||||
|
del item["is_const"]
|
||||||
return item
|
return item
|
||||||
|
|
||||||
|
|
||||||
def generate_excel(array, name, only_file1, only_file2):
|
def generate_excel(array, name, only_file1, only_file2):
|
||||||
# 将列表转成DataFrame,并且按列的方式读取数据(orient='columns')
|
|
||||||
pf = pd.DataFrame.from_dict(array, orient='columns')
|
|
||||||
pf1 = pd.DataFrame(only_file1)
|
|
||||||
pf2 = pd.DataFrame(only_file2)
|
|
||||||
|
|
||||||
# 将列名换为中文名
|
# 将列名换为中文名
|
||||||
columns_map = {
|
columns_map = {
|
||||||
'name': '名称',
|
'name': '名称',
|
||||||
'kind': '节点类型',
|
'kind': '节点类型',
|
||||||
'type': '类型',
|
'type': '类型',
|
||||||
'gn_path': 'gn文件路径',
|
'gn_path': 'gn文件路径',
|
||||||
'location_path': '文件相对路径',
|
"node_content": '节点内容',
|
||||||
'location': '位置行',
|
'location': '位置行',
|
||||||
'return_type': '返回类型',
|
'return_type': '返回类型',
|
||||||
'parm': '参数'
|
'parm': '参数',
|
||||||
|
'location_path': '文件相对路径',
|
||||||
}
|
}
|
||||||
|
|
||||||
pf.rename(columns=columns_map, inplace=True)
|
workbook = openpyxl.Workbook()
|
||||||
with pd.ExcelWriter(name) as writer: # 生成该表格
|
work_sheet1 = workbook.active
|
||||||
pf.to_excel(writer, sheet_name='对比结果', index=False)
|
work_sheet1.title = '对比结果'
|
||||||
pf1.to_excel(writer, sheet_name='生成json独有', index=False)
|
write_dict_to_worksheet(work_sheet1, array, header=columns_map)
|
||||||
pf2.to_excel(writer, sheet_name='原json独有', index=False)
|
|
||||||
|
work_sheet2 = workbook.create_sheet('生成json独有')
|
||||||
|
write_dict_to_worksheet(work_sheet2, only_file1, header=columns_map)
|
||||||
|
|
||||||
|
work_sheet3 = workbook.create_sheet('原有json独有')
|
||||||
|
write_dict_to_worksheet(work_sheet3, only_file2, header=columns_map)
|
||||||
|
workbook.save(name)
|
||||||
|
|
||||||
|
|
||||||
def increase_sheet(array, name, sheet):
|
def write_dict_to_worksheet(work_sheet, result_data, header=None):
|
||||||
pf = pd.DataFrame(array)
|
if header is None:
|
||||||
writer = pd.ExcelWriter(name, mode='a', engine='openpyxl', if_sheet_exists='new')
|
header = {}
|
||||||
pf.to_excel(writer, sheet_name=sheet, index=False)
|
row_num = 1
|
||||||
|
for col_num, col_value in enumerate(header.values()):
|
||||||
|
work_sheet.cell(row_num, col_num + 1, col_value)
|
||||||
|
|
||||||
|
row_num = 2
|
||||||
|
for data in result_data:
|
||||||
|
for col_num, col_value in enumerate(data.values()):
|
||||||
|
if isinstance(col_value, dict):
|
||||||
|
param_data = []
|
||||||
|
for dict_value in col_value.values():
|
||||||
|
if isinstance(dict_value, int):
|
||||||
|
dict_value = str(dict_value)
|
||||||
|
param_data.append(dict_value)
|
||||||
|
param_str = ','.join(param_data)
|
||||||
|
work_sheet.cell(row_num, col_num + 1, param_str)
|
||||||
|
elif isinstance(col_value, list):
|
||||||
|
list_data = ','.join(col_value)
|
||||||
|
work_sheet.cell(row_num, col_num + 1, list_data)
|
||||||
|
else:
|
||||||
|
work_sheet.cell(row_num, col_num + 1, col_value)
|
||||||
|
row_num += 1
|
||||||
|
|
||||||
|
|
||||||
|
def del_repetition_value(data, result_list, compare_list):
|
||||||
|
for item in data:
|
||||||
|
if item not in result_list and item not in compare_list:
|
||||||
|
result_list.append(item)
|
||||||
|
|
||||||
|
|
||||||
def get_json_file(json_file_new, json_file): # 获取生成的json文件
|
def get_json_file(json_file_new, json_file): # 获取生成的json文件
|
||||||
@ -118,6 +171,9 @@ def get_json_file(json_file_new, json_file): # 获取生成的json文件
|
|||||||
only_file2 = []
|
only_file2 = []
|
||||||
for item in json_file2: # 对比每一个json(目录下的)
|
for item in json_file2: # 对比每一个json(目录下的)
|
||||||
# 对比两个json文件
|
# 对比两个json文件
|
||||||
result_list, only_file1, only_file2 = compare_json_file(json_file1, item)
|
result_list_part, only_file1_part, only_file2_part = compare_json_file(json_file1, item)
|
||||||
|
result_list.extend(result_list_part)
|
||||||
|
del_repetition_value(only_file1_part, only_file1, result_list)
|
||||||
|
only_file2.extend(only_file2_part)
|
||||||
|
|
||||||
return result_list, head_name, only_file1, only_file2 # 返回对比数据,和所需表格名
|
return result_list, head_name, only_file1, only_file2 # 返回对比数据,和所需表格名
|
@ -1,3 +1,20 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
##############################################
|
||||||
|
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
##############################################
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import clang.cindex
|
import clang.cindex
|
||||||
@ -6,6 +23,7 @@ from clang.cindex import Index
|
|||||||
from clang.cindex import CursorKind
|
from clang.cindex import CursorKind
|
||||||
from clang.cindex import TypeKind
|
from clang.cindex import TypeKind
|
||||||
from utils.constants import StringConstant
|
from utils.constants import StringConstant
|
||||||
|
from utils.constants import RegularExpressions
|
||||||
|
|
||||||
|
|
||||||
def find_parent(cursor): # 获取父节点
|
def find_parent(cursor): # 获取父节点
|
||||||
@ -18,27 +36,39 @@ def find_parent(cursor): # 获取父节点
|
|||||||
elif cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL:
|
elif cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL:
|
||||||
return cursor_parent.kind
|
return cursor_parent.kind
|
||||||
else:
|
else:
|
||||||
parent = cursor_parent.semantic_parent
|
parent_kind = processing_root_parent(cursor_parent)
|
||||||
if parent is not None:
|
return parent_kind
|
||||||
return parent.kind
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def processing_root_parent(cursor_parent):
|
||||||
|
parent = cursor_parent.semantic_parent
|
||||||
|
if parent is not None:
|
||||||
|
if parent.type.kind == TypeKind.INVALID:
|
||||||
|
parent_kind = CursorKind.TRANSLATION_UNIT
|
||||||
|
return parent_kind
|
||||||
|
else:
|
||||||
|
return parent.kind
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def processing_no_child(cursor, data): # 处理没有子节点的节点
|
def processing_no_child(cursor, data): # 处理没有子节点的节点
|
||||||
if cursor.kind == CursorKind.INTEGER_LITERAL: # 整型字面量类型节点,没有子节点
|
if cursor.kind == CursorKind.INTEGER_LITERAL: # 整型字面量类型节点,没有子节点
|
||||||
parent_kind = find_parent(cursor) # 判断是属于那类的
|
parent_kind = find_parent(cursor) # 判断是属于那类的
|
||||||
if parent_kind == CursorKind.STRUCT_DECL:
|
if parent_kind:
|
||||||
data["name"] = 'struct_int_no_spelling'
|
if parent_kind == CursorKind.STRUCT_DECL:
|
||||||
elif parent_kind == CursorKind.UNION_DECL:
|
data["name"] = 'struct_int_no_spelling'
|
||||||
data["name"] = 'union_int_no_spelling'
|
elif parent_kind == CursorKind.UNION_DECL:
|
||||||
elif parent_kind == CursorKind.ENUM_DECL:
|
data["name"] = 'union_int_no_spelling'
|
||||||
data["name"] = 'enum_int_no_spelling'
|
elif parent_kind == CursorKind.ENUM_DECL:
|
||||||
elif parent_kind == CursorKind.VAR_DECL:
|
data["name"] = 'enum_int_no_spelling'
|
||||||
data["name"] = 'var_int_no_spelling'
|
elif parent_kind == CursorKind.VAR_DECL:
|
||||||
else:
|
data["name"] = 'var_int_no_spelling'
|
||||||
data["name"] = "integer_no_spelling"
|
else:
|
||||||
tokens = cursor.get_tokens()
|
data["name"] = "integer_no_spelling"
|
||||||
for token in tokens:
|
tokens = cursor.get_tokens()
|
||||||
data["integer_value"] = token.spelling # 获取整型变量值
|
for token in tokens:
|
||||||
|
data["integer_value"] = token.spelling # 获取整型变量值
|
||||||
|
|
||||||
|
|
||||||
def get_complex_def(tokens_new, count_token, tokens, data):
|
def get_complex_def(tokens_new, count_token, tokens, data):
|
||||||
@ -101,9 +131,7 @@ def get_token(cursor):
|
|||||||
def judgment_extern(cursor, data): # 判断是否带有extern
|
def judgment_extern(cursor, data): # 判断是否带有extern
|
||||||
tokens = get_token(cursor)
|
tokens = get_token(cursor)
|
||||||
if cursor.kind == CursorKind.FUNCTION_DECL:
|
if cursor.kind == CursorKind.FUNCTION_DECL:
|
||||||
if 'static' in tokens:
|
if 'static' in tokens or 'deprecated' in tokens:
|
||||||
is_extern = False
|
|
||||||
elif 'deprecated' in tokens:
|
|
||||||
is_extern = False
|
is_extern = False
|
||||||
else:
|
else:
|
||||||
is_extern = True
|
is_extern = True
|
||||||
@ -112,6 +140,10 @@ def judgment_extern(cursor, data): # 判断是否带有extern
|
|||||||
is_extern = True
|
is_extern = True
|
||||||
else:
|
else:
|
||||||
is_extern = False
|
is_extern = False
|
||||||
|
if 'const' in tokens:
|
||||||
|
data["is_const"] = True
|
||||||
|
else:
|
||||||
|
data["is_const"] = False
|
||||||
else:
|
else:
|
||||||
is_extern = True
|
is_extern = True
|
||||||
|
|
||||||
@ -129,10 +161,11 @@ def binary_operator(cursor, data): # 二元操作符处理
|
|||||||
|
|
||||||
def distinction_member(cursor, data): # 区别结构体和联合体成员
|
def distinction_member(cursor, data): # 区别结构体和联合体成员
|
||||||
parent_kind = find_parent(cursor) # 查找父节点类型
|
parent_kind = find_parent(cursor) # 查找父节点类型
|
||||||
if parent_kind == CursorKind.UNION_DECL:
|
if parent_kind:
|
||||||
data["member"] = "union_member"
|
if parent_kind == CursorKind.UNION_DECL:
|
||||||
elif parent_kind == CursorKind.STRUCT_DECL:
|
data["member"] = "union_member"
|
||||||
data["member"] = "struct_member"
|
elif parent_kind == CursorKind.STRUCT_DECL:
|
||||||
|
data["member"] = "struct_member"
|
||||||
|
|
||||||
|
|
||||||
def processing_parm(cursor, data): # 函数参数节点处理
|
def processing_parm(cursor, data): # 函数参数节点处理
|
||||||
@ -201,68 +234,103 @@ special_node_process = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def processing_special_node(cursor, data, gn_path=None): # 处理需要特殊处理的节点
|
def processing_special_node(cursor, data, key, gn_path=None): # 处理需要特殊处理的节点
|
||||||
|
if key == 0:
|
||||||
|
location_path = cursor.spelling
|
||||||
|
kind_name = CursorKind.TRANSLATION_UNIT.name
|
||||||
|
else:
|
||||||
|
location_path = cursor.location.file.name
|
||||||
|
kind_name = cursor.kind.name
|
||||||
|
|
||||||
loc = {
|
loc = {
|
||||||
"location_path": '{}'.format(cursor.location.file.name),
|
"location_path": '{}'.format(location_path),
|
||||||
"location_line": cursor.location.line,
|
"location_line": cursor.location.line,
|
||||||
"location_column": cursor.location.column
|
"location_column": cursor.location.column
|
||||||
}
|
}
|
||||||
if gn_path:
|
if gn_path:
|
||||||
relative_path = os.path.relpath(cursor.location.file.name, gn_path) # 获取头文件相对路
|
relative_path = os.path.relpath(location_path, gn_path) # 获取头文件相对路
|
||||||
loc["location_path"] = relative_path
|
loc["location_path"] = relative_path
|
||||||
data["location"] = loc
|
data["location"] = loc
|
||||||
if cursor.kind.name in special_node_process.keys():
|
if kind_name in special_node_process.keys():
|
||||||
node_process = special_node_process[cursor.kind.name]
|
node_process = special_node_process[kind_name]
|
||||||
node_process(cursor, data) # 调用对应节点处理函数
|
node_process(cursor, data) # 调用对应节点处理函数
|
||||||
|
|
||||||
|
|
||||||
def ast_to_dict(cursor, current_file, gn_path=None, comment=None): # 解析数据的整理
|
def node_extent(cursor, current_file):
|
||||||
|
start_offset = cursor.extent.start.offset
|
||||||
|
end_offset = cursor.extent.end.offset
|
||||||
|
with open(current_file, 'r', encoding='utf=8') as f:
|
||||||
|
f.seek(start_offset)
|
||||||
|
content = f.read(end_offset - start_offset)
|
||||||
|
|
||||||
|
extent = {
|
||||||
|
"start_offset": start_offset,
|
||||||
|
"end_offset": end_offset,
|
||||||
|
"content": content
|
||||||
|
}
|
||||||
|
f.close()
|
||||||
|
return extent
|
||||||
|
|
||||||
|
|
||||||
|
def ast_to_dict(cursor, current_file, gn_path=None, comment=None, key=0): # 解析数据的整理
|
||||||
# 通用
|
# 通用
|
||||||
data = {
|
data = {
|
||||||
"name": cursor.spelling,
|
"name": cursor.spelling,
|
||||||
"kind": cursor.kind.name,
|
"kind": '',
|
||||||
"type": cursor.type.spelling,
|
"type": cursor.type.spelling,
|
||||||
"gn_path": gn_path
|
"gn_path": gn_path,
|
||||||
|
"node_content": {},
|
||||||
|
"comment": ''
|
||||||
}
|
}
|
||||||
|
|
||||||
if cursor.raw_comment: # 是否有注释信息,有就取,没有过
|
if cursor.raw_comment: # 是否有注释信息,有就取,没有过
|
||||||
data["comment"] = cursor.raw_comment
|
data["comment"] = cursor.raw_comment
|
||||||
else:
|
else:
|
||||||
data["comment"] = 'none_comment'
|
data["comment"] = 'none_comment'
|
||||||
|
|
||||||
if cursor.kind == CursorKind.TRANSLATION_UNIT: # 把最开始的注释放在根节点这,如果有的话
|
if key == 0:
|
||||||
|
data["kind"] = CursorKind.TRANSLATION_UNIT.name
|
||||||
if comment:
|
if comment:
|
||||||
data["comment"] = comment[0]
|
data["comment"] = comment
|
||||||
|
if gn_path:
|
||||||
|
relative_path = os.path.relpath(cursor.spelling, gn_path)
|
||||||
|
data["name"] = relative_path
|
||||||
else:
|
else:
|
||||||
processing_special_node(cursor, data, gn_path) # 节点处理
|
content = node_extent(cursor, current_file)
|
||||||
|
data["node_content"] = content
|
||||||
|
data["kind"] = cursor.kind.name
|
||||||
|
|
||||||
|
processing_special_node(cursor, data, key, gn_path) # 节点处理
|
||||||
children = list(cursor.get_children()) # 判断是否有子节点,有就追加children,没有根据情况来
|
children = list(cursor.get_children()) # 判断是否有子节点,有就追加children,没有根据情况来
|
||||||
if len(children) > 0:
|
if len(children) > 0:
|
||||||
if cursor.kind == CursorKind.FUNCTION_DECL: # 函数参数
|
if key != 0:
|
||||||
name = "parm"
|
if cursor.kind == CursorKind.FUNCTION_DECL: # 函数参数
|
||||||
elif (cursor.kind == CursorKind.ENUM_DECL
|
name = "parm"
|
||||||
or cursor.kind == CursorKind.STRUCT_DECL
|
elif (cursor.kind == CursorKind.ENUM_DECL
|
||||||
or cursor.kind == CursorKind.UNION_DECL):
|
or cursor.kind == CursorKind.STRUCT_DECL
|
||||||
name = "members"
|
or cursor.kind == CursorKind.UNION_DECL):
|
||||||
|
name = "members"
|
||||||
|
else:
|
||||||
|
name = "children"
|
||||||
else:
|
else:
|
||||||
name = "children"
|
name = "children"
|
||||||
data[name] = []
|
data[name] = []
|
||||||
processing_ast_node(children, current_file, data, name, gn_path)
|
|
||||||
|
for child in children:
|
||||||
|
# 剔除多余宏定义和跳过UNEXPOSED_ATTR节点
|
||||||
|
if child.location.file is not None and child.kind != CursorKind.UNEXPOSED_ATTR \
|
||||||
|
and child.location.file.name == current_file:
|
||||||
|
processing_ast_node(child, current_file, data, name, gn_path)
|
||||||
else:
|
else:
|
||||||
processing_no_child(cursor, data) # 处理没有子节点的节点
|
processing_no_child(cursor, data) # 处理没有子节点的节点
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def processing_ast_node(children, current_file, data, name, gn_path):
|
def processing_ast_node(child, current_file, data, name, gn_path):
|
||||||
for child in children:
|
child_data = ast_to_dict(child, current_file, gn_path, key=1)
|
||||||
# 剔除多余宏定义和跳过UNEXPOSED_ATTR节点
|
if child.kind == CursorKind.TYPE_REF:
|
||||||
if child.location.file is not None and child.kind != CursorKind.UNEXPOSED_ATTR:
|
data["type_ref"] = child_data
|
||||||
if child.location.file.name == current_file:
|
else:
|
||||||
child_data = ast_to_dict(child, current_file, gn_path)
|
data[name].append(child_data)
|
||||||
data[name].append(child_data)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def preorder_travers_ast(cursor, total, comment, current_file, gn_path=None): # 获取属性
|
def preorder_travers_ast(cursor, total, comment, current_file, gn_path=None): # 获取属性
|
||||||
@ -271,16 +339,41 @@ def preorder_travers_ast(cursor, total, comment, current_file, gn_path=None): #
|
|||||||
|
|
||||||
|
|
||||||
def get_start_comments(include_path): # 获取每个头文件的最开始注释
|
def get_start_comments(include_path): # 获取每个头文件的最开始注释
|
||||||
|
file_comment = []
|
||||||
|
content = open_file(include_path)
|
||||||
|
if content:
|
||||||
|
pattern = RegularExpressions.START_COMMENT.value
|
||||||
|
matches = re.findall(pattern, content, re.DOTALL | re.MULTILINE)
|
||||||
|
file_comment.extend(matches)
|
||||||
|
|
||||||
with open(include_path, 'r', encoding='utf-8') as f:
|
with open(include_path, 'r', encoding='utf-8') as f:
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
content = f.read()
|
content = f.read()
|
||||||
pattern = r'/\*[^/]*\*/\s*/\*[^/]*\*/\s*(?=#ifndef)'
|
pattern_high = RegularExpressions.END_COMMENT.value
|
||||||
matches = re.findall(pattern, content, re.DOTALL | re.MULTILINE)
|
matches_high = re.findall(pattern_high, content, re.DOTALL | re.MULTILINE)
|
||||||
if matches is None:
|
if matches_high:
|
||||||
pattern = r'/\*[^/]*\*/\s*(?=#ifndef)'
|
file_comment.extend(matches_high)
|
||||||
matches = re.findall(pattern, content, re.DOTALL | re.MULTILINE)
|
f.close()
|
||||||
|
return file_comment
|
||||||
|
|
||||||
return matches
|
|
||||||
|
def open_file(include_path):
|
||||||
|
with open(include_path, 'r', encoding='utf-8') as f:
|
||||||
|
content = ''
|
||||||
|
loge = 0
|
||||||
|
for line in f:
|
||||||
|
if line.startswith('#ifdef __cplusplus'):
|
||||||
|
loge = 1
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
inside_ifdef = True
|
||||||
|
|
||||||
|
if inside_ifdef:
|
||||||
|
content += line
|
||||||
|
if loge == 0:
|
||||||
|
content = ''
|
||||||
|
f.close()
|
||||||
|
return content
|
||||||
|
|
||||||
|
|
||||||
def api_entrance(share_lib, include_path, gn_path=None, link_path=None): # 统计入口
|
def api_entrance(share_lib, include_path, gn_path=None, link_path=None): # 统计入口
|
@ -1,9 +1,27 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
##############################################
|
||||||
|
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
##############################################
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import glob
|
import glob
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
from utils.constants import StringConstant
|
from utils.constants import StringConstant, RegularExpressions
|
||||||
|
from typedef.parser.parser import ParserGetResultTable
|
||||||
from coreImpl.parser import parse_include, generating_tables # 引入解析文件 # 引入得到结果表格文件
|
from coreImpl.parser import parse_include, generating_tables # 引入解析文件 # 引入得到结果表格文件
|
||||||
|
|
||||||
|
|
||||||
@ -21,12 +39,12 @@ def find_h_file(matches, f, sources):
|
|||||||
# 匹配sources = \[[^\]]*\](匹配方括号内的内容,其中包括一个或多个非右括号字符),\s*:匹配0个或多个空白字符
|
# 匹配sources = \[[^\]]*\](匹配方括号内的内容,其中包括一个或多个非右括号字符),\s*:匹配0个或多个空白字符
|
||||||
f.seek(mat.span()[0])
|
f.seek(mat.span()[0])
|
||||||
content = f.read()
|
content = f.read()
|
||||||
pattern = r'sources\s*=\s*\[[^\]]*\]'
|
pattern = RegularExpressions.SOURCES.value
|
||||||
sources_match = re.search(pattern, content)
|
sources_match = re.search(pattern, content)
|
||||||
if sources_match:
|
if sources_match:
|
||||||
sources_value = sources_match.group(0) # 获取完整匹配的字符串
|
sources_value = sources_match.group(0) # 获取完整匹配的字符串
|
||||||
sources_value = re.sub(r'\s', '', sources_value) # 去除源字符串的空白字符(换行符)和空格
|
sources_value = re.sub(r'\s', '', sources_value) # 去除源字符串的空白字符(换行符)和空格
|
||||||
pattern = r'"([^"]+h)"' # 匹配引号中的内容,找对应的.h
|
pattern = RegularExpressions.INCLUDE_H.value # 匹配引号中的内容,找对应的.h
|
||||||
source = re.findall(pattern, sources_value)
|
source = re.findall(pattern, sources_value)
|
||||||
sources.extend(source)
|
sources.extend(source)
|
||||||
|
|
||||||
@ -34,13 +52,13 @@ def find_h_file(matches, f, sources):
|
|||||||
def find_function_file(file, function_name): # 在GN文件中查找指定函数并在有函数名,获取对应sources的值
|
def find_function_file(file, function_name): # 在GN文件中查找指定函数并在有函数名,获取对应sources的值
|
||||||
with open(file, 'r') as f:
|
with open(file, 'r') as f:
|
||||||
content = f.read() # 获取文件内容
|
content = f.read() # 获取文件内容
|
||||||
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
|
pattern = ''.join([r'\b', re.escape(function_name), r'\b']) # '\b'确保函数名的完全匹配
|
||||||
matches = re.finditer(pattern, content) # finditer会返回位置信息
|
matches = re.finditer(pattern, content) # finditer会返回位置信息
|
||||||
f.seek(0) # 回到文件开始位置
|
f.seek(0) # 回到文件开始位置
|
||||||
sources = [] # 装全部匹配的sources的.h(可能不止一个-headers函数)
|
sources = [] # 装全部匹配的sources的.h(可能不止一个-headers函数)
|
||||||
if matches: # 是否匹配成功
|
if matches: # 是否匹配成功
|
||||||
find_h_file(matches, f, sources)
|
find_h_file(matches, f, sources)
|
||||||
print("where", sources)
|
f.close()
|
||||||
return matches, sources
|
return matches, sources
|
||||||
|
|
||||||
|
|
||||||
@ -49,7 +67,7 @@ def find_dest_dir(matches, content, f):
|
|||||||
if matches:
|
if matches:
|
||||||
end = 0
|
end = 0
|
||||||
for _ in matches:
|
for _ in matches:
|
||||||
pattern = r'dest_dir\s*=\s*"([^"]*)"'
|
pattern = RegularExpressions.DEST_DIR.value
|
||||||
source_match = re.search(pattern, content)
|
source_match = re.search(pattern, content)
|
||||||
if source_match:
|
if source_match:
|
||||||
con = source_match.group(1)
|
con = source_match.group(1)
|
||||||
@ -63,10 +81,11 @@ def find_dest_dir(matches, content, f):
|
|||||||
def get_dest_dir(file, function_name): # 获取dest_dir
|
def get_dest_dir(file, function_name): # 获取dest_dir
|
||||||
with open(file, 'r') as f:
|
with open(file, 'r') as f:
|
||||||
content = f.read() # 获取文件内容
|
content = f.read() # 获取文件内容
|
||||||
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
|
pattern = ''.join([r'\b', re.escape(function_name), r'\b']) # '\b'确保函数名的完全匹配
|
||||||
matches = re.findall(pattern, content)
|
matches = re.findall(pattern, content)
|
||||||
f.seek(0)
|
f.seek(0)
|
||||||
sources_dir = find_dest_dir(matches, content, f)
|
sources_dir = find_dest_dir(matches, content, f)
|
||||||
|
f.close()
|
||||||
return sources_dir
|
return sources_dir
|
||||||
|
|
||||||
|
|
||||||
@ -78,8 +97,6 @@ def find_json_file(gn_file_match): # 找gn文件同级目录下的.json文件
|
|||||||
return match_json_file
|
return match_json_file
|
||||||
|
|
||||||
|
|
||||||
# def get_
|
|
||||||
|
|
||||||
def dire_func(gn_file, func_name): # 统计数据的
|
def dire_func(gn_file, func_name): # 统计数据的
|
||||||
matches_file_total = [] # 统计有ohos_ndk_headers函数的gn文件
|
matches_file_total = [] # 统计有ohos_ndk_headers函数的gn文件
|
||||||
json_file_total = [] # 统计跟含有函数的gn文件同级的json文件
|
json_file_total = [] # 统计跟含有函数的gn文件同级的json文件
|
||||||
@ -98,7 +115,7 @@ def change_json_file(dict_data, name): # 生成json文件
|
|||||||
with open(file_name, 'w', encoding='UTF-8') as f: # encoding='UTF-8'能显示中文
|
with open(file_name, 'w', encoding='UTF-8') as f: # encoding='UTF-8'能显示中文
|
||||||
# ensure_ascii=False确保能显示中文,indent=4(格式控制)使生成的json样式跟字典一样
|
# ensure_ascii=False确保能显示中文,indent=4(格式控制)使生成的json样式跟字典一样
|
||||||
json.dump(dict_data, f, ensure_ascii=False, indent=4)
|
json.dump(dict_data, f, ensure_ascii=False, indent=4)
|
||||||
|
f.close()
|
||||||
return file_name
|
return file_name
|
||||||
|
|
||||||
|
|
||||||
@ -131,6 +148,7 @@ def get_result_table(json_files, abs_path, link_path, gn_path): # 进行处理
|
|||||||
head_name = ""
|
head_name = ""
|
||||||
only_file1 = []
|
only_file1 = []
|
||||||
only_file2 = []
|
only_file2 = []
|
||||||
|
data = []
|
||||||
if json_files:
|
if json_files:
|
||||||
file_name = os.path.split(json_files[0]) # 取第一个json名,但我是用列表装的
|
file_name = os.path.split(json_files[0]) # 取第一个json名,但我是用列表装的
|
||||||
file_name = os.path.splitext(file_name[1]) # 取下标1对应的元素(元组)
|
file_name = os.path.splitext(file_name[1]) # 取下标1对应的元素(元组)
|
||||||
@ -139,7 +157,10 @@ def get_result_table(json_files, abs_path, link_path, gn_path): # 进行处理
|
|||||||
# 解析完后,传两个json文件,对比两个json文件,最后生成数据表格
|
# 解析完后,传两个json文件,对比两个json文件,最后生成数据表格
|
||||||
result_list, head_name, only_file1, only_file2 = generating_tables.get_json_file(parse_json_name,
|
result_list, head_name, only_file1, only_file2 = generating_tables.get_json_file(parse_json_name,
|
||||||
json_files)
|
json_files)
|
||||||
return result_list, head_name, only_file1, only_file2
|
|
||||||
|
obj_data = ParserGetResultTable(result_list, head_name, only_file1, only_file2, data)
|
||||||
|
|
||||||
|
return obj_data
|
||||||
|
|
||||||
|
|
||||||
def create_dir(sources_dir, gn_file, function_name, link_include_file):
|
def create_dir(sources_dir, gn_file, function_name, link_include_file):
|
||||||
@ -160,15 +181,19 @@ def create_dir(sources_dir, gn_file, function_name, link_include_file):
|
|||||||
match_files, json_files, include_files = dire_func(gn_file, function_name)
|
match_files, json_files, include_files = dire_func(gn_file, function_name)
|
||||||
dire_path = os.path.dirname(gn_file) # 获取gn文件路径
|
dire_path = os.path.dirname(gn_file) # 获取gn文件路径
|
||||||
if match_files:
|
if match_files:
|
||||||
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
|
dir_copy(include_files, dire_path, new_dire)
|
||||||
for j_item in abs_path:
|
|
||||||
shutil.copy(j_item, new_dire)
|
|
||||||
else:
|
else:
|
||||||
print("在create_dir函数中,原因:gn文件条件不满足")
|
print("在create_dir函数中,原因:gn文件条件不满足")
|
||||||
else:
|
else:
|
||||||
print("gn文件没有ohos_sdk_headers")
|
print("gn文件没有ohos_sdk_headers")
|
||||||
|
|
||||||
|
|
||||||
|
def dir_copy(include_files, dire_path, new_dire):
|
||||||
|
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
|
||||||
|
for j_item in abs_path:
|
||||||
|
shutil.copy(j_item, new_dire)
|
||||||
|
|
||||||
|
|
||||||
def link_include(directory_path, function_names, link_include_file):
|
def link_include(directory_path, function_names, link_include_file):
|
||||||
gn_file_total = find_gn_file(directory_path) # 查找gn文件
|
gn_file_total = find_gn_file(directory_path) # 查找gn文件
|
||||||
for item in gn_file_total: # 处理每个gn文件
|
for item in gn_file_total: # 处理每个gn文件
|
||||||
@ -182,6 +207,7 @@ def main_entrance(directory_path, function_names, link_path): # 主入口
|
|||||||
result_list_total = []
|
result_list_total = []
|
||||||
only_file1_total = []
|
only_file1_total = []
|
||||||
only_file2_total = []
|
only_file2_total = []
|
||||||
|
data_total = [] # 总的解析数据
|
||||||
for item in gn_file_total: # 处理每个gn文件
|
for item in gn_file_total: # 处理每个gn文件
|
||||||
match_files, json_files, include_files = dire_func(item, function_names)
|
match_files, json_files, include_files = dire_func(item, function_names)
|
||||||
dire_path = os.path.dirname(item) # 获取gn文件路径
|
dire_path = os.path.dirname(item) # 获取gn文件路径
|
||||||
@ -191,32 +217,38 @@ def main_entrance(directory_path, function_names, link_path): # 主入口
|
|||||||
if include_files: # 符合条件的gn文件
|
if include_files: # 符合条件的gn文件
|
||||||
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
|
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
|
||||||
print("头文件绝对路径:\n", abs_path)
|
print("头文件绝对路径:\n", abs_path)
|
||||||
result_list, head_name, only_file1, only_file2 = get_result_table(json_files, abs_path,
|
# 接收对比结果信息
|
||||||
link_path, dire_path) # 接收对比结果信息
|
data_result = get_result_table(json_files, abs_path, link_path, dire_path)
|
||||||
if len(result_list) != 0:
|
data_total.append(data_result.data)
|
||||||
result_list_total.extend(result_list)
|
if len(data_result.result_list) != 0:
|
||||||
only_file1_total.extend(only_file1)
|
result_list_total.extend(data_result.result_list)
|
||||||
only_file2_total.extend(only_file2)
|
only_file1_total.extend(data_result.only_file1)
|
||||||
elif head_name == "":
|
only_file2_total.extend(data_result.only_file2)
|
||||||
|
elif data_result.head_name == "":
|
||||||
print("gn文件下无json文件")
|
print("gn文件下无json文件")
|
||||||
else:
|
else:
|
||||||
generating_tables.generate_excel(result_list, head_name, only_file1, only_file2)
|
generating_tables.generate_excel(data_result.result_list, data_result.head_name,
|
||||||
|
data_result.only_file1, data_result.only_file2)
|
||||||
print("没有匹配项")
|
print("没有匹配项")
|
||||||
else:
|
else:
|
||||||
print("gn文件无header函数")
|
print("gn文件无header函数")
|
||||||
head_name = "result_total.xlsx" # 总结果表格
|
generating_tables.generate_excel(result_list_total, StringConstant.RESULT_HEAD_NAME.value,
|
||||||
generating_tables.generate_excel(result_list_total, head_name, only_file1_total, only_file2_total)
|
only_file1_total, only_file2_total)
|
||||||
|
|
||||||
|
obj_data_total = ParserGetResultTable(result_list_total, '', only_file1_total,
|
||||||
|
only_file2_total, data_total)
|
||||||
|
return obj_data_total
|
||||||
|
|
||||||
|
|
||||||
def copy_std_lib(link_include_file):
|
def copy_std_lib(link_include_file):
|
||||||
std_include = r'sysroot\ndk_musl_include_files'
|
std_include = StringConstant.STD_INCLUDE.value
|
||||||
if not os.path.exists(std_include):
|
if not os.path.exists(std_include):
|
||||||
shutil.copytree(StringConstant.INCLUDE_LIB.value, std_include)
|
shutil.copytree(StringConstant.INCLUDE_LIB.value, std_include)
|
||||||
link_include_file.append(std_include)
|
link_include_file.append(std_include)
|
||||||
|
|
||||||
|
|
||||||
def find_include(link_include_path):
|
def find_include(link_include_path):
|
||||||
for dir_path, _, _ in os.walk('sysroot\\$ndk_headers_out_dir'):
|
for dir_path, _, _ in os.walk(RegularExpressions.CREATE_LIB_PATH.value):
|
||||||
link_include_path.append(dir_path)
|
link_include_path.append(dir_path)
|
||||||
|
|
||||||
|
|
||||||
@ -228,10 +260,11 @@ def parser(directory_path): # 目录路径
|
|||||||
find_include(link_include_path)
|
find_include(link_include_path)
|
||||||
link_include(directory_path, function_name, link_include_path)
|
link_include(directory_path, function_name, link_include_path)
|
||||||
|
|
||||||
main_entrance(directory_path, function_name, link_include_path) # 调用入口函数
|
data_total = main_entrance(directory_path, function_name, link_include_path) # 调用入口函数
|
||||||
|
return data_total
|
||||||
|
|
||||||
|
|
||||||
def parser_include_ast(gn_file_path, include_path):
|
def parser_include_ast(gn_file_path, include_path): # 对于单独的.h解析接口
|
||||||
link_path = [StringConstant.INCLUDE_LIB.value]
|
link_path = [StringConstant.INCLUDE_LIB.value]
|
||||||
data = parse_include.get_include_file(include_path, link_path, gn_file_path)
|
data = parse_include.get_include_file(include_path, link_path, gn_file_path)
|
||||||
return data
|
return data
|
40
build-tools/capi_parser/src/main.py
Normal file
40
build-tools/capi_parser/src/main.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
##############################################
|
||||||
|
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
##############################################
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from bin import config
|
||||||
|
|
||||||
|
|
||||||
|
def main_function():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
prog=config.Config.name, description=config.Config.description)
|
||||||
|
for command in config.Config.commands:
|
||||||
|
arg_abbr = command.get("abbr")
|
||||||
|
arg_name = command.get("name")
|
||||||
|
arg_choices = command.get("choices")
|
||||||
|
arg_required = (True if command.get("required") else False)
|
||||||
|
arg_type = command.get("type")
|
||||||
|
default = command.get("default")
|
||||||
|
arg_help = command.get("help")
|
||||||
|
parser.add_argument(arg_abbr, arg_name, choices=arg_choices,
|
||||||
|
required=arg_required, type=arg_type, default=default, help=arg_help)
|
||||||
|
|
||||||
|
config.run_tools(parser.parse_args())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main_function()
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
import enum
|
import enum
|
||||||
|
|
||||||
|
|
||||||
class TAGS(enum.Enum):
|
class TAGS(enum.Enum):
|
||||||
ADD_TO_GROUP = 'addtogroup'
|
ADD_TO_GROUP = 'addtogroup'
|
||||||
BRIEF = 'brief'
|
BRIEF = 'brief'
|
||||||
@ -87,18 +88,25 @@ class ErrorMessage(enum.Enum):
|
|||||||
USE_UPPER_TAG = "the [$$] tag is upper. Should use lower: [$$]"
|
USE_UPPER_TAG = "the [$$] tag is upper. Should use lower: [$$]"
|
||||||
REPEAT_FILE_TAG = 'the [$$] tag is repeat. Please check the tag in file'
|
REPEAT_FILE_TAG = 'the [$$] tag is repeat. Please check the tag in file'
|
||||||
ERROR_INFO_VALUE_TAG = 'the [$$] tag value is incorrect. Please check the usage method'
|
ERROR_INFO_VALUE_TAG = 'the [$$] tag value is incorrect. Please check the usage method'
|
||||||
ERROR_INFO_VALUE_LIBRARY = 'the [library] tag value is incorrect. This tag must be end with .so or .a, or is NA. Please check the usage method'
|
ERROR_INFO_VALUE_LIBRARY = 'the [library] tag value is incorrect. This tag must be end with .so or .a, \
|
||||||
ERROR_INFO_VALUE_PARAM = 'the value of the [$$] [param] tag is incorrect. Please check if it matches the [$$] parameter name'
|
or is NA. Please check the usage method'
|
||||||
|
ERROR_INFO_VALUE_PARAM = 'the value of the [$$] [param] tag is incorrect. Please check if it matches \
|
||||||
|
the [$$] parameter name'
|
||||||
ERROR_INFO_COUNT_PARAM = 'the count of the [param] tag is wrong. Please check the parameters and Doc'
|
ERROR_INFO_COUNT_PARAM = 'the count of the [param] tag is wrong. Please check the parameters and Doc'
|
||||||
ERROR_INFO_VALUE_PERMISSION = 'the [permission] tag value is incorrect. Please check if the permission field has been configured or update the configuration file'
|
ERROR_INFO_VALUE_PERMISSION = 'the [permission] tag value is incorrect. Please check if the permission \
|
||||||
|
field has been configured or update the configuration file'
|
||||||
ERROR_INFO_VALUE_SINCE = 'the [since] tag value is incorrect. Please check if the tag value is a numerical value'
|
ERROR_INFO_VALUE_SINCE = 'the [since] tag value is incorrect. Please check if the tag value is a numerical value'
|
||||||
ERROR_INFO_VALUE_SYSCAP = 'the [syscap] tag value is incorrect. Please check if the syscap field is configured'
|
ERROR_INFO_VALUE_SYSCAP = 'the [syscap] tag value is incorrect. Please check if the syscap field is configured'
|
||||||
ERROR_USE_LEFT_BRACE = 'the validity verification of the Doc tag failed. The [{] tag is not allowed to used in Doc which not has addtogroup tag, or used in the wrong place.'
|
ERROR_USE_LEFT_BRACE = 'the validity verification of the Doc tag failed. The [{] tag is not allowed to used \
|
||||||
ERROR_REPEAT_LEFT_BRACE = 'the validity verification of the Doc tag failed. The [{] tag is not allowed to reuse in Doc which has addtogroup tag.'
|
in Doc which not has addtogroup tag, or used in the wrong place.'
|
||||||
ERROR_USE_RIGHT_BRACE = 'the validity verification of the JSDoc tag failed. The [}] tag is not allowed to be reused please delete the extra tags.'
|
ERROR_REPEAT_LEFT_BRACE = 'the validity verification of the Doc tag failed. The [{] tag is not allowed to \
|
||||||
|
reuse in Doc which has addtogroup tag.'
|
||||||
|
ERROR_USE_RIGHT_BRACE = 'the validity verification of the JSDoc tag failed. The [}] tag is not allowed to \
|
||||||
|
be reused please delete the extra tags.'
|
||||||
ERROR_FILE_HAS_ONE_LOSE_OTHER = 'the file has the $$, but do not has the $$.'
|
ERROR_FILE_HAS_ONE_LOSE_OTHER = 'the file has the $$, but do not has the $$.'
|
||||||
ERROR_FILE_LOSE_ONE = 'the file missing $$'
|
ERROR_FILE_LOSE_ONE = 'the file missing $$'
|
||||||
FUNCTION_DECL = 'Function naming should use the big hump naming style or beginning with OH/OS,and using "_" segmentation.'
|
FUNCTION_DECL = 'Function naming should use the big hump naming style or beginning with OH/OS,and \
|
||||||
|
using "_" segmentation.'
|
||||||
STRUCT_DECL = 'Structure type naming should use the big hump naming style.'
|
STRUCT_DECL = 'Structure type naming should use the big hump naming style.'
|
||||||
ENUM_DECL = 'Enum type naming should use the big hump naming style.'
|
ENUM_DECL = 'Enum type naming should use the big hump naming style.'
|
||||||
UNION_DECL = 'Consortium type naming should use the big hump naming style.'
|
UNION_DECL = 'Consortium type naming should use the big hump naming style.'
|
||||||
@ -106,10 +114,12 @@ class ErrorMessage(enum.Enum):
|
|||||||
PARM_DECL = 'Function parameters naming should use the small hump naming style.'
|
PARM_DECL = 'Function parameters naming should use the small hump naming style.'
|
||||||
MACRO_PARAMETERS_TYPE_NAMING_ERROR = 'Macro parameters naming should use the small hump naming style.'
|
MACRO_PARAMETERS_TYPE_NAMING_ERROR = 'Macro parameters naming should use the small hump naming style.'
|
||||||
FIELD_DECL = 'Fields in the structure naming should use the small hump naming style.'
|
FIELD_DECL = 'Fields in the structure naming should use the small hump naming style.'
|
||||||
MEMBERS_OF_THE_CONSORTIUM_TYPE_NAMING_ERROR = 'Members of the consortium naming should use the small hump naming style.'
|
MEMBERS_OF_THE_CONSORTIUM_TYPE_NAMING_ERROR = 'Members of the consortium naming should use the \
|
||||||
|
small hump naming style.'
|
||||||
MACRO_DEFINITION = 'Macro naming should use all uppercase, separated by underscores naming style.'
|
MACRO_DEFINITION = 'Macro naming should use all uppercase, separated by underscores naming style.'
|
||||||
ENUM_CONSTANT_DECL = 'Enum value naming should use all uppercase, separated by underscores naming style.'
|
ENUM_CONSTANT_DECL = 'Enum value naming should use all uppercase, separated by underscores naming style.'
|
||||||
GOTO_LABEL_TYPE_NAMING_ERROR = 'Goto label value naming should use all uppercase, separated by underscores naming style.'
|
GOTO_LABEL_TYPE_NAMING_ERROR = 'Goto label value naming should use all uppercase, separated by \
|
||||||
|
underscores naming style.'
|
||||||
GLOBAL_VARIABLE_TYPE_NAMING_ERROR = 'Global variable should increase "g_" prefix.'
|
GLOBAL_VARIABLE_TYPE_NAMING_ERROR = 'Global variable should increase "g_" prefix.'
|
||||||
TRANSLATION_UNIT = 'File naming should be all lowercase, separated by underscores.'
|
TRANSLATION_UNIT = 'File naming should be all lowercase, separated by underscores.'
|
||||||
|
|
||||||
@ -118,139 +128,139 @@ class OutputTxt:
|
|||||||
id = -1
|
id = -1
|
||||||
level = -1
|
level = -1
|
||||||
location = ''
|
location = ''
|
||||||
filePath = ''
|
file_path = ''
|
||||||
message = ''
|
message = ''
|
||||||
|
|
||||||
def __init__(self, id, level, location, file_path, message):
|
def __init__(self, id_param, level_param, location_param, file_path_param, message_param):
|
||||||
self.id = id
|
self.id = id_param
|
||||||
self.level = level
|
self.level = level_param
|
||||||
self.location = location
|
self.location = location_param
|
||||||
self.filePath = file_path
|
self.file_path = file_path_param
|
||||||
self.message = message
|
self.message = message_param
|
||||||
|
|
||||||
def get_id(self):
|
def get_id(self):
|
||||||
return self.id
|
return self.id
|
||||||
|
|
||||||
def set_id(self, id):
|
def set_id(self, id_param):
|
||||||
self.id = id
|
self.id = id_param
|
||||||
|
|
||||||
def get_level(self):
|
def get_level(self):
|
||||||
return self.level
|
return self.level
|
||||||
|
|
||||||
def set_level(self, level):
|
def set_level(self, level_param):
|
||||||
self.level = level
|
self.level = level_param
|
||||||
|
|
||||||
def get_location(self):
|
def get_location(self):
|
||||||
return self.location
|
return self.location
|
||||||
|
|
||||||
def set_location(self, location):
|
def set_location(self, location_param):
|
||||||
self.location = location
|
self.location = location_param
|
||||||
|
|
||||||
def get_file_path(self):
|
def get_file_path(self):
|
||||||
return self.filePath
|
return self.file_path
|
||||||
|
|
||||||
def set_file_path(self, file_path):
|
def set_file_path(self, file_path_param):
|
||||||
self.filePath = file_path
|
self.file_path = file_path_param
|
||||||
|
|
||||||
def get_message(self):
|
def get_message(self):
|
||||||
return self.message
|
return self.message
|
||||||
|
|
||||||
def set_message(self, message):
|
def set_message(self, message_param):
|
||||||
self.message = message
|
self.message = message_param
|
||||||
|
|
||||||
|
|
||||||
class ApiResultInfo:
|
class ApiResultInfo:
|
||||||
errorType: ErrorType = ErrorType.DEFAULT.value
|
error_type: ErrorType = ErrorType.DEFAULT.value
|
||||||
errorInfo = ''
|
error_info = ''
|
||||||
level: ErrorLevel = -1
|
level: ErrorLevel = -1
|
||||||
apiName = ''
|
api_name = ''
|
||||||
apiFullText = ''
|
api_full_text = ''
|
||||||
fileName = ''
|
file_name = ''
|
||||||
location = ''
|
location = ''
|
||||||
locationLine = -1
|
location_line = -1
|
||||||
locationColumn = -1
|
location_column = -1
|
||||||
type: LogType = LogType.DEFAULT.value
|
type: LogType = LogType.DEFAULT.value
|
||||||
version = -1
|
version = -1
|
||||||
basename = ''
|
basename = ''
|
||||||
|
|
||||||
def __init__(self, error_type=None, error_info='', api_name=''):
|
def __init__(self, error_type_param=None, error_info_param='', api_name_param=''):
|
||||||
if error_type is None:
|
if error_type_param is None:
|
||||||
error_type = ErrorType.DEFAULT.value
|
error_type_param = ErrorType.DEFAULT.value
|
||||||
self.errorType = error_type
|
self.error_type = error_type_param
|
||||||
self.errorInfo = error_info
|
self.error_info = error_info_param
|
||||||
self.apiName = api_name
|
self.api_name = api_name_param
|
||||||
|
|
||||||
def get_error_type(self):
|
def get_error_type(self):
|
||||||
return self.errorType
|
return self.error_type
|
||||||
|
|
||||||
def set_error_type(self, error_type):
|
def set_error_type(self, error_type_param):
|
||||||
self.errorType = error_type
|
self.error_type = error_type_param
|
||||||
|
|
||||||
def get_file_name(self):
|
def get_file_name(self):
|
||||||
return self.fileName
|
return self.file_name
|
||||||
|
|
||||||
def set_file_name(self, file_name):
|
def set_file_name(self, file_name_param):
|
||||||
self.fileName = file_name
|
self.file_name = file_name_param
|
||||||
|
|
||||||
def get_type(self):
|
def get_type(self):
|
||||||
return self.type
|
return self.type
|
||||||
|
|
||||||
def set_type(self, type):
|
def set_type(self, type_param):
|
||||||
self.type = type
|
self.type = type_param
|
||||||
|
|
||||||
def get_error_info(self):
|
def get_error_info(self):
|
||||||
return self.errorInfo
|
return self.error_info
|
||||||
|
|
||||||
def set_error_info(self, error_info):
|
def set_error_info(self, error_info_param):
|
||||||
self.errorInfo = error_info
|
self.error_info = error_info_param
|
||||||
|
|
||||||
def get_version(self):
|
def get_version(self):
|
||||||
return self.version
|
return self.version
|
||||||
|
|
||||||
def set_version(self, version):
|
def set_version(self, version_param):
|
||||||
self.version = version
|
self.version = version_param
|
||||||
|
|
||||||
def get_basename(self):
|
def get_basename(self):
|
||||||
return self.basename
|
return self.basename
|
||||||
|
|
||||||
def set_basename(self, basename):
|
def set_basename(self, basename_param):
|
||||||
self.basename = basename
|
self.basename = basename_param
|
||||||
|
|
||||||
def get_level(self):
|
def get_level(self):
|
||||||
return self.level
|
return self.level
|
||||||
|
|
||||||
def set_level(self, level):
|
def set_level(self, level_param):
|
||||||
self.level = level
|
self.level = level_param
|
||||||
|
|
||||||
def get_api_name(self):
|
def get_api_name(self):
|
||||||
return self.apiName
|
return self.api_name
|
||||||
|
|
||||||
def set_api_name(self, api_name):
|
def set_api_name(self, api_name_param):
|
||||||
self.apiName = api_name
|
self.api_name = api_name_param
|
||||||
|
|
||||||
def get_api_full_text(self):
|
def get_api_full_text(self):
|
||||||
return self.apiFullText
|
return self.api_full_text
|
||||||
|
|
||||||
def set_api_full_text(self, api_full_text):
|
def set_api_full_text(self, api_full_text_param):
|
||||||
self.apiFullText = api_full_text
|
self.api_full_text = api_full_text_param
|
||||||
|
|
||||||
def get_location_line(self):
|
def get_location_line(self):
|
||||||
return self.locationLine
|
return self.location_line
|
||||||
|
|
||||||
def set_location_line(self, location_line):
|
def set_location_line(self, location_line_param):
|
||||||
self.locationLine = location_line
|
self.location_line = location_line_param
|
||||||
|
|
||||||
def get_location_column(self):
|
def get_location_column(self):
|
||||||
return self.locationColumn
|
return self.location_column
|
||||||
|
|
||||||
def set_location_column(self, location_column):
|
def set_location_column(self, location_column_param):
|
||||||
self.locationColumn = location_column
|
self.location_column = location_column_param
|
||||||
|
|
||||||
def get_location(self):
|
def get_location(self):
|
||||||
return self.location
|
return self.location
|
||||||
|
|
||||||
def set_location(self, location):
|
def set_location(self, location_param):
|
||||||
self.location = location
|
self.location = location_param
|
||||||
|
|
||||||
|
|
||||||
class DocInfo:
|
class DocInfo:
|
28
build-tools/capi_parser/src/typedef/parser/parser.py
Normal file
28
build-tools/capi_parser/src/typedef/parser/parser.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2023 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
class ParserGetResultTable:
|
||||||
|
result_list = []
|
||||||
|
head_name = ""
|
||||||
|
only_file1 = []
|
||||||
|
only_file2 = []
|
||||||
|
data = []
|
||||||
|
|
||||||
|
def __init__(self, result_list_need, head_name_need, only_file1_need, only_file2_need, data_need):
|
||||||
|
self.result_list = result_list_need
|
||||||
|
self.head_name = head_name_need
|
||||||
|
self.only_file1 = only_file1_need
|
||||||
|
self.only_file2 = only_file2_need
|
||||||
|
self.data = data_need
|
37
build-tools/capi_parser/src/utils/constants.py
Normal file
37
build-tools/capi_parser/src/utils/constants.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
##############################################
|
||||||
|
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
##############################################
|
||||||
|
|
||||||
|
import enum
|
||||||
|
|
||||||
|
|
||||||
|
class StringConstant(enum.Enum):
|
||||||
|
LIB_CLG_PATH = r'D:\Environment\LLVM\bin\libclang.dll' # 共享库
|
||||||
|
FUNK_NAME = "ohos_ndk_headers"
|
||||||
|
REPLACE_WAREHOUSE = '\\interface_sdk_c\\interface_sdk_c' # 拉到本地仓路径(去掉磁盘)
|
||||||
|
# 拉到本地仓的三方库绝对路径
|
||||||
|
INCLUDE_LIB = r'.\third_party\musl\ndk_musl_include'
|
||||||
|
STD_INCLUDE = r'.\sysroot\ndk_musl_include_files'
|
||||||
|
RESULT_HEAD_NAME = "result_total.xlsx"
|
||||||
|
|
||||||
|
|
||||||
|
class RegularExpressions(enum.Enum):
|
||||||
|
START_COMMENT = r'/\*\*(.*?)\*/'
|
||||||
|
END_COMMENT = r'/\*\* @} \*/'
|
||||||
|
SOURCES = r'sources\s*=\s*\[[^\]]*\]'
|
||||||
|
DEST_DIR = r'dest_dir\s*=\s*"([^"]*)"'
|
||||||
|
INCLUDE_H = r'"([^"]+h)"'
|
||||||
|
CREATE_LIB_PATH = r'sysroot\$ndk_headers_out_dir'
|
@ -1,12 +1,25 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2023 Huawei Device Co., Ltd.
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import unittest
|
import unittest
|
||||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../src")))
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../src")))
|
||||||
from coreImpl.check.check import get_check_result, write_in_txt
|
from coreImpl.check.check import get_check_result_list, write_in_txt
|
||||||
|
|
||||||
|
|
||||||
class TestMethods(unittest.TestCase):
|
class TestMethods(unittest.TestCase):
|
||||||
@ -18,7 +31,7 @@ class TestMethods(unittest.TestCase):
|
|||||||
for dirpath, dirnames, filenames in os.walk(test_case_path):
|
for dirpath, dirnames, filenames in os.walk(test_case_path):
|
||||||
for item in filenames:
|
for item in filenames:
|
||||||
file_name = item.split('.')[0]
|
file_name = item.split('.')[0]
|
||||||
check_result = get_check_result([os.path.join(dirpath, item)])
|
check_result = get_check_result_list([os.path.join(dirpath, item)])
|
||||||
write_in_txt(check_result, os.path.join(output_path, "{}.txt".format(file_name)))
|
write_in_txt(check_result, os.path.join(output_path, "{}.txt".format(file_name)))
|
||||||
with open(os.path.join(expect_path, "{}.txt".format(file_name))) as json_file:
|
with open(os.path.join(expect_path, "{}.txt".format(file_name))) as json_file:
|
||||||
permission_file_content = json.load(json_file)
|
permission_file_content = json.load(json_file)
|
@ -1,22 +0,0 @@
|
|||||||
import argparse
|
|
||||||
from bin import config
|
|
||||||
|
|
||||||
|
|
||||||
def main_function():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
prog=config.Config.name, description=config.Config.description)
|
|
||||||
for command in config.Config.commands:
|
|
||||||
arg_abbr = command.get("abbr")
|
|
||||||
arg_name = command.get("name")
|
|
||||||
arg_choices = command.get("choices")
|
|
||||||
arg_required = (True if command.get("required") else False)
|
|
||||||
arg_type = command.get("type")
|
|
||||||
default = command.get("default")
|
|
||||||
arg_help = command.get("help")
|
|
||||||
parser.add_argument(arg_abbr, arg_name, choices=arg_choices,
|
|
||||||
required=arg_required, type=arg_type, default=default, help=arg_help)
|
|
||||||
|
|
||||||
config.run_tools(parser.parse_args())
|
|
||||||
|
|
||||||
|
|
||||||
main_function()
|
|
@ -1,9 +0,0 @@
|
|||||||
import enum
|
|
||||||
|
|
||||||
|
|
||||||
class StringConstant(enum.Enum):
|
|
||||||
LIB_CLG_PATH = r'D:\Environment\LLVM\bin\libclang.dll' # 共享库
|
|
||||||
FUNK_NAME = "ohos_ndk_headers"
|
|
||||||
REPLACE_WAREHOUSE = '\\interface_sdk_c\\interface_sdk_c' # 拉到本地仓路径(去掉磁盘)
|
|
||||||
# 拉到本地仓的三方库绝对路径
|
|
||||||
INCLUDE_LIB = r'third_party\musl\ndk_musl_include'
|
|
Loading…
Reference in New Issue
Block a user