mirror of
https://gitee.com/openharmony/interface_sdk_c
synced 2024-12-02 12:06:59 +00:00
commit
480c927982
@ -1,3 +1,24 @@
|
||||
1.使用该工具前需要修改[constants.py](./src/utils/constants.py)文件下的StringConstant.LIB_CLANG_PATH和StringConstant.LINK_INCLUDE_PATH;
|
||||
StringConstant.LIB_CLANG_PATH:clang共享库
|
||||
StringConstant.LINK_INCLUDE_PATH:所需要引入的头文件路径目录
|
||||
1.使用该工具前需要修改[constants.py](./src/utils/constants.py)文件下的StringConstant.LIB_CLANG_PATH和StringConstant.REPLACE_WAREHOUSE;
|
||||
StringConstant.LIB_CLANG_PATH:libclang.dll共享库(本地的)
|
||||
REPLACE_WAREHOUSE:拉下来的interface_sdk_c仓的目录(本地的路径) --例如:(去掉磁盘的路径)\\interface_sdk_c
|
||||
|
||||
2.环境:
|
||||
1)python-3.11.4-amd64
|
||||
|
||||
2)PyCharm Community Edition 2023.2
|
||||
|
||||
3)下载[requirements.txt](./requirements.txt)下面的库 ---下载指令:pip install -r txt文件路径
|
||||
|
||||
4)需要把src目录设置为sources root(找到src目录,点击右键,将目标标记为里面)
|
||||
|
||||
5)运行的是src目录下的mian.py文件
|
||||
|
||||
3.终端指令
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-N {collect,diff}, --tool-name {collect,diff}
|
||||
工具名称
|
||||
-P PARSER_PATH, --parser-path PARSER_PATH
|
||||
解析路径
|
||||
|
||||
例如:用的统计工具:终端指令:py -N collect -P 目录文件路径 ---就是调用CAPI统计工具的使用
|
@ -3,7 +3,7 @@ from coreImpl.parser import parser
|
||||
|
||||
|
||||
class ToolNameType(enum.Enum):
|
||||
COOLLECT = 'collect'
|
||||
COLLECT = 'collect'
|
||||
DIFF = 'diff'
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ formatSet = [member.value for name,
|
||||
|
||||
def run_tools(options):
|
||||
tool_name = options.tool_name
|
||||
if tool_name == ToolNameType["COOLLECT"].value:
|
||||
if tool_name == ToolNameType["COLLECT"].value:
|
||||
parser.parser(options.parser_path)
|
||||
elif tool_name == ToolNameType["DIFF"].value:
|
||||
print("开发中。。。")
|
||||
@ -34,5 +34,5 @@ class Config(object):
|
||||
name = 'parser'
|
||||
version = '0.1.0'
|
||||
description = 'Compare the parser the NDKS'
|
||||
commands = [{"name": "--tool-name", "abbr": "-N", "required": True, "choices": toolNameTypeSet, "type": str, "default": ToolNameType["COOLLECT"], "help":"工具名称"},
|
||||
commands = [{"name": "--tool-name", "abbr": "-N", "required": True, "choices": toolNameTypeSet, "type": str, "default": ToolNameType["COLLECT"], "help":"工具名称"},
|
||||
{"name": "--parser-path", "abbr": "-P", "required": True, "type": str, "help": "解析路径"}]
|
||||
|
@ -9,15 +9,38 @@ def compare_json_file(js_file1, js_file2):
|
||||
with open(js_file2, 'r') as js2:
|
||||
data2 = json.load(js2)
|
||||
compare_result = []
|
||||
only_file1 = [] # 装file1独有的
|
||||
result_api = filter_compare(data1)
|
||||
for i in range(len(result_api)):
|
||||
name1 = result_api[i]["name"]
|
||||
key = 0
|
||||
for item in data2:
|
||||
if item["name"]:
|
||||
name2 = item["name"]
|
||||
if name1 == name2:
|
||||
key = 1
|
||||
compare_result.append(result_api[i])
|
||||
return compare_result
|
||||
break
|
||||
if key == 0:
|
||||
only_file1.append(result_api[i])
|
||||
only_file2 = get_difference_data(compare_result, data2) # 获取file2独有的
|
||||
|
||||
return compare_result, only_file1, only_file2
|
||||
|
||||
|
||||
def get_difference_data(compare_result, data2):
|
||||
only_file2 = []
|
||||
for item in data2:
|
||||
name2 = item["name"]
|
||||
key = 0
|
||||
for j in range(len(compare_result)):
|
||||
name1 = compare_result[j]["name"]
|
||||
if name2 == name1:
|
||||
key = 1
|
||||
break
|
||||
if key == 0:
|
||||
only_file2.append(item)
|
||||
return only_file2
|
||||
|
||||
|
||||
def filter_compare(data1): # 获取函数和变量
|
||||
@ -25,29 +48,58 @@ def filter_compare(data1):
|
||||
for i in range(len(data1)):
|
||||
for item1 in data1[i]["children"]: # 抛开根节点
|
||||
if (item1["kind"] == 'FUNCTION_DECL' or item1["kind"] == 'VAR_DECL') and item1["is_extern"]:
|
||||
if item1["kind"] == 'FUNCTION_DECL':
|
||||
del item1["parm"] # 剔除参数键值对,可以不需要
|
||||
del item1["is_extern"] # 剔除is_extern键值对,过滤后都是extern
|
||||
result_api.append(item1)
|
||||
|
||||
item = filter_func(item1)
|
||||
result_api.append(item)
|
||||
return result_api
|
||||
|
||||
|
||||
def generate_excel(array, name):
|
||||
pf = pd.DataFrame.from_dict(array, orient='columns') # 将列表转成DataFrame,并且按列的方式读取数据(orient='columns')
|
||||
def filter_func(item):
|
||||
del item["is_extern"] # 剔除is_extern键值对,过滤后都是extern
|
||||
del item["comment"]
|
||||
item["location_path"] = item["location"]["location_path"]
|
||||
item["location"] = item["location"]["location_line"]
|
||||
if item["kind"] == 'FUNCTION_DECL':
|
||||
item["kind"] = '函数类型'
|
||||
parm = [] # 装函数参数
|
||||
if "parm" in item:
|
||||
if item["parm"]:
|
||||
for i in range(len(item["parm"])):
|
||||
if item["parm"][i]["kind"] != 'PARM_DECL':
|
||||
continue
|
||||
else:
|
||||
str_parm = item["parm"][i]["type"] + ' ' + item["parm"][i]["name"]
|
||||
parm.append(str_parm)
|
||||
item["parm"] = parm
|
||||
else:
|
||||
item["kind"] = '变量类型'
|
||||
return item
|
||||
|
||||
order = ['name', 'kind', 'type', 'return_type'] # 指定字段顺序
|
||||
pf = pf[order]
|
||||
|
||||
def generate_excel(array, name, only_file1, only_file2):
|
||||
pf = pd.DataFrame.from_dict(array, orient='columns') # 将列表转成DataFrame,并且按列的方式读取数据(orient='columns')
|
||||
pf1 = pd.DataFrame(only_file1)
|
||||
pf2 = pd.DataFrame(only_file2)
|
||||
columns_map = { # 将列名换为中文名
|
||||
'name': '名称',
|
||||
'kind': '节点类型',
|
||||
'type': '类型',
|
||||
'gn_path': 'gn文件路径',
|
||||
'location_path': '文件相对路径',
|
||||
'location': '位置行',
|
||||
'return_type': '返回类型',
|
||||
'parm': '参数'
|
||||
}
|
||||
|
||||
pf.rename(columns=columns_map, inplace=True)
|
||||
with pd.ExcelWriter(name) as writer: # 生成该表格
|
||||
pf.to_excel(writer, sheet_name='对比结果', index=False)
|
||||
pf1.to_excel(writer, sheet_name='生成json独有', index=False)
|
||||
pf2.to_excel(writer, sheet_name='原json独有', index=False)
|
||||
|
||||
pf.to_excel(name, index=False) # 生成该表格
|
||||
|
||||
def increase_sheet(array, name, sheet):
|
||||
pf = pd.DataFrame(array)
|
||||
writer = pd.ExcelWriter(name, mode='a', engine='openpyxl', if_sheet_exists='new')
|
||||
pf.to_excel(writer, sheet_name=sheet, index=False)
|
||||
|
||||
|
||||
def get_json_file(json_file_new, json_file): # 获取生成的json文件
|
||||
@ -56,9 +108,9 @@ def get_json_file(json_file_new, json_file):
|
||||
head_name = os.path.splitext(json_file1) # 去掉文件名后缀
|
||||
head_name = head_name[0] + '.xlsx' # 加后缀
|
||||
result_list = []
|
||||
only_file1 = []
|
||||
only_file2 = []
|
||||
for i in range(len(json_file2)): # 对比每一个json(目录下的)
|
||||
result_list = compare_json_file(json_file1, json_file2[i]) # 对比两个json文件
|
||||
|
||||
return result_list, head_name # 返回对比数据,和所需表格名
|
||||
|
||||
result_list, only_file1, only_file2 = compare_json_file(json_file1, json_file2[i]) # 对比两个json文件
|
||||
|
||||
return result_list, head_name, only_file1, only_file2 # 返回对比数据,和所需表格名
|
||||
|
@ -4,8 +4,7 @@ from clang.cindex import Config # 配置
|
||||
from clang.cindex import Index # 主要API
|
||||
from clang.cindex import CursorKind # 索引结点的类别
|
||||
from clang.cindex import TypeKind # 节点的语义类别
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
def find_parent(cursor): # 获取父节点
|
||||
@ -14,7 +13,7 @@ def find_parent(cursor): # 获取父节点
|
||||
if cursor_parent.kind == CursorKind.VAR_DECL: # 父节点为VAR_DECL 用于整型变量节点
|
||||
return cursor_parent.kind
|
||||
|
||||
if cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL: # 用于判断里面成员属于那类
|
||||
elif cursor_parent.kind == CursorKind.STRUCT_DECL or cursor_parent.kind == CursorKind.UNION_DECL: # 用于判断里面成员属于那类
|
||||
return cursor_parent.kind
|
||||
else:
|
||||
parent = cursor_parent.semantic_parent
|
||||
@ -25,7 +24,7 @@ def find_parent(cursor): # 获取父节点
|
||||
|
||||
|
||||
def processing_no_child(cursor, data): # 处理没有子节点的节点
|
||||
if cursor.kind.name == CursorKind.INTEGER_LITERAL: # 整型字面量类型节点,没有子节点
|
||||
if cursor.kind == CursorKind.INTEGER_LITERAL: # 整型字面量类型节点,没有子节点
|
||||
parent_kind = find_parent(cursor) # 判断是属于那类的
|
||||
if parent_kind == CursorKind.STRUCT_DECL:
|
||||
data["name"] = 'struct_int_no_spelling'
|
||||
@ -46,7 +45,10 @@ def processing_complex_def(tokens, data): # 处理复合型宏
|
||||
tokens_new = tokens[1:] # 跳过正常宏名
|
||||
logo_com = 0 # 记录复合型,复合型文本也得根据这个
|
||||
count_com = 0
|
||||
count_token = len(tokens_new) # value ()
|
||||
for token in tokens_new:
|
||||
if token.kind.name == 'KEYWORD':
|
||||
break
|
||||
if token.kind.name == 'IDENTIFIER':
|
||||
count = 1
|
||||
logo = 0
|
||||
@ -56,7 +58,9 @@ def processing_complex_def(tokens, data): # 处理复合型宏
|
||||
break
|
||||
else:
|
||||
count += 1
|
||||
if logo == 1: # 获取复合型宏定义宏名
|
||||
if count_token == count:
|
||||
pass
|
||||
elif logo == 1: # 获取复合型宏定义宏名
|
||||
logo_com = logo
|
||||
count_com = count + 1
|
||||
tokens_name = tokens[:count + 1]
|
||||
@ -79,12 +83,32 @@ def get_def_text(tokens, data, logo_compose, count_compose):
|
||||
pass
|
||||
|
||||
|
||||
def judgment_extern(cursor, data): # 判断是否带有extern
|
||||
is_extern = False
|
||||
def get_token(cursor):
|
||||
tokens = []
|
||||
for token in cursor.get_tokens():
|
||||
if token.spelling == 'extern':
|
||||
tokens.append(token.spelling)
|
||||
|
||||
return tokens
|
||||
|
||||
|
||||
def judgment_extern(cursor, data): # 判断是否带有extern
|
||||
is_extern = None
|
||||
tokens = get_token(cursor)
|
||||
if cursor.kind == CursorKind.FUNCTION_DECL:
|
||||
if 'static' in tokens:
|
||||
is_extern = False
|
||||
# elif 'deprecated' in tokens and ('attribute' in tokens or '__declspec' in tokens):
|
||||
elif 'deprecated' in tokens:
|
||||
is_extern = False
|
||||
else:
|
||||
is_extern = True
|
||||
break
|
||||
elif cursor.kind == CursorKind.VAR_DECL:
|
||||
if 'extern' in tokens:
|
||||
is_extern = True
|
||||
else:
|
||||
is_extern = False
|
||||
else:
|
||||
is_extern = True
|
||||
if is_extern:
|
||||
data["is_extern"] = is_extern
|
||||
else:
|
||||
@ -175,30 +199,39 @@ special_node_process = {
|
||||
}
|
||||
|
||||
|
||||
def processing_special_node(cursor, data): # 处理需要特殊处理的节点
|
||||
def processing_special_node(cursor, data, gn_path=None): # 处理需要特殊处理的节点
|
||||
loc = {
|
||||
"location_path": '{}'.format(cursor.location.file.name),
|
||||
"location_line": cursor.location.line,
|
||||
"location_column": cursor.location.column
|
||||
}
|
||||
relative_path = os.path.relpath(cursor.location.file.name, gn_path) # 获取头文件相对路
|
||||
loc["location_path"] = relative_path
|
||||
data["location"] = loc
|
||||
if cursor.kind.name in special_node_process.keys():
|
||||
node_process = special_node_process[cursor.kind.name]
|
||||
node_process(cursor, data) # 调用对应节点处理函数
|
||||
|
||||
|
||||
def ast_to_dict(cursor, comment=None): # 解析数据的整理
|
||||
def ast_to_dict(cursor, current_file, gn_path=None, comment=None): # 解析数据的整理
|
||||
data = { # 通用
|
||||
"name": cursor.spelling,
|
||||
"kind": cursor.kind.name,
|
||||
"type": cursor.type.spelling,
|
||||
"gn_path": gn_path
|
||||
}
|
||||
|
||||
if cursor.raw_comment: # 是否有注释信息,有就取,没有过
|
||||
data["comment"] = cursor.raw_comment
|
||||
else:
|
||||
pass
|
||||
data["comment"] = 'none_comment'
|
||||
|
||||
if cursor.kind == CursorKind.TRANSLATION_UNIT: # 把最开始的注释放在根节点这,如果有的话
|
||||
if comment:
|
||||
data["comment"] = comment[0]
|
||||
|
||||
else:
|
||||
processing_special_node(cursor, data) # 节点处理
|
||||
processing_special_node(cursor, data, gn_path) # 节点处理
|
||||
|
||||
children = list(cursor.get_children()) # 判断是否有子节点,有就追加children,没有根据情况来
|
||||
if len(children) > 0:
|
||||
@ -211,15 +244,18 @@ def ast_to_dict(cursor, comment=None): # 解析数据的整理
|
||||
data[name] = []
|
||||
for child in children:
|
||||
if child.location.file is not None and child.kind != CursorKind.UNEXPOSED_ATTR: # 剔除多余宏定义和跳过UNEXPOSED_ATTR节点
|
||||
child_data = ast_to_dict(child)
|
||||
data[name].append(child_data)
|
||||
if child.location.file.name == current_file:
|
||||
child_data = ast_to_dict(child, current_file, gn_path)
|
||||
data[name].append(child_data)
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
processing_no_child(cursor, data) # 处理没有子节点的节点
|
||||
return data
|
||||
|
||||
|
||||
def preorder_travers_ast(cursor, total, comment): # 获取属性
|
||||
ast_dict = ast_to_dict(cursor, comment) # 获取节点属性
|
||||
def preorder_travers_ast(cursor, total, comment, current_file, gn_path=None): # 获取属性
|
||||
ast_dict = ast_to_dict(cursor, current_file, gn_path, comment) # 获取节点属性
|
||||
total.append(ast_dict) # 追加到数据统计列表里面
|
||||
|
||||
|
||||
@ -232,12 +268,11 @@ def get_start_comments(include_path): # 获取每个头文件的最开始注释
|
||||
if matches is None:
|
||||
pattern = r'/\*[^/]*\*/\s*(?=#ifndef)'
|
||||
matches = re.findall(pattern, content, re.DOTALL | re.MULTILINE)
|
||||
return matches
|
||||
else:
|
||||
return None
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def api_entrance(share_lib, include_path=None, link_path=None): # 统计入口
|
||||
def api_entrance(share_lib, include_path, gn_path, link_path=None): # 统计入口
|
||||
# clang.cindex需要用到libclang.dll共享库 所以配置共享库
|
||||
if Config.loaded:
|
||||
print("config.loaded == true")
|
||||
@ -250,13 +285,12 @@ def api_entrance(share_lib, include_path=None, link_path=None): # 统计入口
|
||||
print('=' * 50)
|
||||
# options赋值为如下,代表宏定义解析数据也要
|
||||
args = ['-I{}'.format(path) for path in link_path]
|
||||
args.append('-std=c99')
|
||||
options = clang.cindex.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD
|
||||
print(args)
|
||||
|
||||
data_total = [] # 列表对象-用于统计
|
||||
for i in range(len(include_path)): # 对每个头文件做处理
|
||||
file = r'{}'.format(include_path[i])
|
||||
print('文件名:{}'.format(file))
|
||||
tu = index.parse(include_path[i], args=args, options=options)
|
||||
print(tu)
|
||||
print('=' * 50)
|
||||
@ -264,20 +298,20 @@ def api_entrance(share_lib, include_path=None, link_path=None): # 统计入口
|
||||
print(ast_root_node)
|
||||
matches = get_start_comments(include_path[i]) # 接收文件最开始的注释
|
||||
# 前序遍历AST
|
||||
preorder_travers_ast(ast_root_node, data_total, matches) # 调用处理函数
|
||||
preorder_travers_ast(ast_root_node, data_total, matches, include_path[i], gn_path) # 调用处理函数
|
||||
print('=' * 50)
|
||||
|
||||
return data_total
|
||||
|
||||
|
||||
def get_include_file(libclang, include_file_path, link_path): # 库路径、.h文件路径、链接头文件路径
|
||||
def get_include_file(libclang, include_file_path, link_path, gn_path=None): # 库路径、.h文件路径、链接头文件路径
|
||||
# libclang.dll库路径
|
||||
libclang_path = libclang
|
||||
# c头文件的路径
|
||||
file_path = include_file_path
|
||||
|
||||
# 头文件链接路径
|
||||
link_include_path = link_path # 可以通过列表传入
|
||||
data = api_entrance(libclang_path, file_path, link_include_path) # 调用接口
|
||||
data = api_entrance(libclang_path, file_path, gn_path, link_include_path) # 调用接口
|
||||
|
||||
return data
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os # 可用于操作目录文件
|
||||
import glob # 可用于查找指定目录下指定后缀的文件
|
||||
import re # 正则表达是模块--可用于操作文件里面的内容
|
||||
import shutil # 拷贝文件
|
||||
from coreImpl.parser import parse_include, generating_tables # 引入解析文件 # 引入得到结果表格文件
|
||||
import json
|
||||
from utils.constants import StringConstant
|
||||
@ -23,7 +24,8 @@ def find_function_file(file, function_name):
|
||||
f.seek(0) # 回到文件开始位置
|
||||
if len(matches): # 是否匹配成功
|
||||
sources = [] # 转全部匹配的sources的.h(可能不止一个-headers函数)
|
||||
end = 0
|
||||
f.seek(0)
|
||||
end = 0 # 记录光标
|
||||
for i in range(len(matches)):
|
||||
# 匹配sources = \[[^\]]*\](匹配方括号内的内容,其中包括一个或多个非右括号字符),\s*:匹配0个或多个空白字符
|
||||
pattern = r'sources\s*=\s*\[[^\]]*\]'
|
||||
@ -39,7 +41,32 @@ def find_function_file(file, function_name):
|
||||
content = f.read() # 从当前位置读取问价内容,防止重复
|
||||
return len(matches) > 0, sources
|
||||
else:
|
||||
return None, None # gn文件没有对应的函数
|
||||
return None, None # gn文件没有对应的函数
|
||||
|
||||
|
||||
def get_dest_dir(file, function_name): # 获取dest_dir
|
||||
with open(file, 'r') as f:
|
||||
content = f.read() # 获取文件内容
|
||||
pattern = r'\b' + re.escape(function_name) + r'\b' # '\b'确保函数名的完全匹配
|
||||
matches = re.findall(pattern, content)
|
||||
f.seek(0)
|
||||
if matches:
|
||||
sources_dir = []
|
||||
f.seek(0)
|
||||
end = 0
|
||||
for i in range(len(matches)):
|
||||
pattern = r'dest_dir\s*=\s*"([^"]*)"'
|
||||
source_match = re.search(pattern, content)
|
||||
if source_match:
|
||||
con = source_match.group(1)
|
||||
con_real = con[1:]
|
||||
sources_dir.append(con)
|
||||
end += source_match.end() # 每次找完一个sources的.h路径,记录光标结束位置
|
||||
f.seek(end) # 移动光标在该结束位置
|
||||
content = f.read()
|
||||
return sources_dir
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def find_json_file(gn_file_match): # 找gn文件同级目录下的.json文件
|
||||
@ -50,12 +77,14 @@ def find_json_file(gn_file_match):
|
||||
return match_json_file
|
||||
|
||||
|
||||
# def get_
|
||||
|
||||
def dire_func(gn_file, func_name): # 统计数据的
|
||||
matches_file_total = [] # 统计有ohos_ndk_headers函数的gn文件
|
||||
json_file_total = [] # 统计跟含有函数的gn文件同级的json文件
|
||||
source_include = [] # 统计sources里面的.h
|
||||
length, source = find_function_file(gn_file, func_name) # 找到包含函数的gn文件和同级目录下的.json文件
|
||||
if length: # 保证两个都不为空,source可能为空(因为gn能没有函数名)
|
||||
if length: # 保证两个都不为空,source可能为空
|
||||
source_include = source # 获取头文件列表
|
||||
matches_file_total.append(gn_file) # 调用匹配函数的函数(说明有对应的函数、source)
|
||||
json_file_total.extend(find_json_file(gn_file)) # 找json
|
||||
@ -80,46 +109,77 @@ def change_abs(include_files, dire_path):
|
||||
if os.path.isabs(include_files[j]): # 是否是绝对路径,是就拼接路径盘,不是就拼接gn目录路径
|
||||
head = os.path.splitdrive(dire_path) # 获取windows盘路径
|
||||
include_file = os.path.normpath(include_files[j])
|
||||
include_file = include_file.replace('\\\\', '\\') # 去掉绝对路径的双\\
|
||||
include_file = os.path.join(head[0], include_file) # 拼接盘和路径
|
||||
include_file = include_file.replace('\\\\interface\\sdk_c', StringConstant.REPLACE_WAREHOUSE.value) # 去掉绝对路径的双\\,替换为interface_sdk_c
|
||||
if head:
|
||||
include_file = os.path.join(head[0], include_file) # 拼接盘和路径
|
||||
abs_path.append(include_file)
|
||||
else:
|
||||
abs_path.append(os.path.join(dire_path, os.path.normpath(include_files[j])))
|
||||
relative_path = os.path.abspath(os.path.join(dire_path, os.path.normpath(include_files[j]))) # ../ .解决
|
||||
abs_path.append(relative_path)
|
||||
print("头文件绝对路径:\n", abs_path)
|
||||
print("=" * 50)
|
||||
return abs_path
|
||||
|
||||
|
||||
def get_result_table(json_files, abs_path, lib_path, link_path): # 进行处理,生成表格
|
||||
def get_result_table(json_files, abs_path, lib_path, link_path, gn_path): # 进行处理,生成表格
|
||||
if json_files:
|
||||
file_name = os.path.split(json_files[0]) # 取第一个json名,但我是用列表装的
|
||||
file_name = os.path.splitext(file_name[1]) # 取下标1对应的元素(元组)
|
||||
data = parse_include.get_include_file(lib_path, abs_path, link_path) # 获取解析返回的数据
|
||||
data = parse_include.get_include_file(lib_path, abs_path, link_path, gn_path) # 获取解析返回的数据
|
||||
parse_json_name = change_json_file(data, file_name[0]) # 生成json文件
|
||||
result_list, head_name = generating_tables.get_json_file(parse_json_name, json_files) # 解析完后,传两个json文件,对比两个json文件,最后生成数据表格
|
||||
return result_list, head_name
|
||||
result_list, head_name, only_file1, only_file2 = generating_tables.get_json_file(parse_json_name, json_files) # 解析完后,传两个json文件,对比两个json文件,最后生成数据表格
|
||||
return result_list, head_name, only_file1, only_file2
|
||||
else:
|
||||
return None, None
|
||||
|
||||
|
||||
def create_dir(sources_dir, gn_file, function_name, link_include_file):
|
||||
for i in range(len(sources_dir)):
|
||||
directory = sources_dir[i]
|
||||
new_dire = os.path.join('sysroot', directory)
|
||||
new_dire = os.path.normpath(new_dire)
|
||||
if not os.path.exists(new_dire):
|
||||
os.makedirs(new_dire)
|
||||
|
||||
else:
|
||||
print("目录已存在")
|
||||
if new_dire in link_include_file:
|
||||
pass
|
||||
else:
|
||||
link_include_file.append(new_dire) # 添加链接的头文件
|
||||
match_files, json_files, include_files = dire_func(gn_file, function_name)
|
||||
dire_path = os.path.dirname(gn_file) # 获取gn文件路径
|
||||
if match_files:
|
||||
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
|
||||
for j in range(len(abs_path)):
|
||||
shutil.copy(abs_path[j], new_dire)
|
||||
else:
|
||||
print("在create_dir函数中,原因:gn文件条件不满足")
|
||||
|
||||
|
||||
def link_include(directory_path, function_names, link_include_file):
|
||||
gn_file_total = find_gn_file(directory_path) # 查找gn文件
|
||||
for i in range(len(gn_file_total)): # 处理每个gn文件
|
||||
sources_dir = get_dest_dir(gn_file_total[i], function_names)
|
||||
create_dir(sources_dir, gn_file_total[i], function_names, link_include_file)
|
||||
|
||||
|
||||
def main_entrance(directory_path, function_names, lib_path, link_path): # 主入口
|
||||
gn_file_total = find_gn_file(directory_path) # 查找gn文件
|
||||
print("gn文件:", gn_file_total)
|
||||
|
||||
for i in range(len(gn_file_total)): # 处理每个gn文件
|
||||
match_files, json_files, include_files = dire_func(gn_file_total[i], function_names)
|
||||
dire_path = os.path.dirname(gn_file_total[i]) # 获取gn文件路径
|
||||
|
||||
print("目录路径: {}".format(dire_path))
|
||||
|
||||
print("同级json文件:\n", json_files)
|
||||
print("头文件:\n", include_files)
|
||||
|
||||
if match_files: # 符合条件的gn文件
|
||||
abs_path = change_abs(include_files, dire_path) # 接收.h绝对路径
|
||||
result_list, head_name = get_result_table(json_files, abs_path, lib_path, link_path) # 接收是否获转为表格信息
|
||||
result_list, head_name, only_file1, only_file2 = get_result_table(json_files, abs_path, lib_path, link_path, dire_path) # 接收对比结果信息
|
||||
generating_tables.generate_excel(result_list, head_name, only_file1, only_file2) # 转为表格
|
||||
if result_list:
|
||||
generating_tables.generate_excel(result_list, head_name)
|
||||
print("有匹配项,已生成表格")
|
||||
else:
|
||||
print("没有匹配项 or gn文件下无json文件")
|
||||
@ -127,13 +187,26 @@ def main_entrance(directory_path, function_names, lib_path, link_path):
|
||||
print("gn文件无header函数")
|
||||
|
||||
|
||||
def copy_std_lib(link_include_file):
|
||||
std_include = r'sysroot\ndk_musl_include_files'
|
||||
if not os.path.exists(std_include):
|
||||
shutil.copytree(r'third_party\musl\ndk_musl_include', std_include)
|
||||
link_include_file.append(std_include)
|
||||
|
||||
|
||||
def find_include(link_include_path):
|
||||
for dir_path, dir_name, file_name in os.walk('sysroot\\$ndk_headers_out_dir'):
|
||||
link_include_path.append(dir_path)
|
||||
|
||||
|
||||
def parser(directory_path): # 目录路径
|
||||
function_name = StringConstant.FUNK_NAME.value # 匹配的函数名
|
||||
|
||||
libclang_path = StringConstant.LIB_CLANG_PATH.value # 共享库路径
|
||||
link_include_path = StringConstant.LINK_INCLUDE_PATH.value # 链接头文件路径
|
||||
|
||||
link_include_path = [] # 装链接头文件路径
|
||||
copy_std_lib(link_include_path) # ndk头文件移到sysroot中
|
||||
find_include(link_include_path)
|
||||
link_include(directory_path, function_name, link_include_path)
|
||||
|
||||
main_entrance(directory_path, function_name, libclang_path, link_include_path) # 调用入口函数
|
||||
|
||||
|
||||
|
||||
|
@ -4,4 +4,4 @@ import enum
|
||||
class StringConstant(enum.Enum):
|
||||
LIB_CLANG_PATH = r'D:\Environment\LLVM\bin\libclang.dll'
|
||||
FUNK_NAME = "ohos_ndk_headers"
|
||||
LINK_INCLUDE_PATH = [r'E:\interface_sdk_c\interface_sdk_c']
|
||||
REPLACE_WAREHOUSE = '\\interface_sdk_c\\interface_sdk_c'
|
||||
|
Loading…
Reference in New Issue
Block a user