pikmin2/tools/calcprogress.py

296 lines
10 KiB
Python
Raw Normal View History

2021-10-04 02:12:53 +00:00
#!/usr/bin/env python3
################################################################################
# Description #
################################################################################
# calcprogress: Used to calculate the progress of the Pikmin 2 decomp. #
2021-10-04 02:12:53 +00:00
# Prints to stdout for now, but eventually will have some form of storage, #
# i.e. CSV, so that it can be used for a webpage display. #
# #
# Usage: No arguments needed #
################################################################################
###############################################
# #
# Imports #
# #
###############################################
import os
import sys
import struct
import re
import math
import csv
2022-07-25 05:42:41 +00:00
from datetime import datetime
2021-10-04 02:12:53 +00:00
###############################################
# #
# Constants #
# #
###############################################
MEM1_HI = 0x81200000
MEM1_LO = 0x80004000
MW_WII_SYMBOL_REGEX = r"^\s*"\
2022-08-17 02:28:07 +00:00
r"(?P<SectOfs>\w{8})\s+"\
r"(?P<Size>\w{6})\s+"\
r"(?P<VirtOfs>\w{8})\s+"\
r"(?P<FileOfs>\w{8})\s+"\
r"(\w{1,2})\s+"\
r"(?P<Symbol>[0-9A-Za-z_<>$@.*]*)\s*"\
r"(?P<Object>\S*)"
2021-10-04 02:12:53 +00:00
MW_GC_SYMBOL_REGEX = r"^\s*"\
2022-08-17 02:28:07 +00:00
r"(?P<SectOfs>\w{8})\s+"\
r"(?P<Size>\w{6})\s+"\
r"(?P<VirtOfs>\w{8})\s+"\
r"(\w{1,2})\s+"\
r"(?P<Symbol>[0-9A-Za-z_<>$@.*]*)\s*"\
r"(?P<Object>\S*)"
2021-10-04 02:12:53 +00:00
REGEX_TO_USE = MW_GC_SYMBOL_REGEX
2021-10-04 02:12:53 +00:00
TEXT_SECTIONS = ["init", "text"]
DATA_SECTIONS = [
2022-08-17 02:28:07 +00:00
"rodata", "data", "bss", "sdata", "sbss", "sdata2", "sbss2",
"ctors", "_ctors", "dtors", "ctors$99", "_ctors$99", "ctors$00", "dtors$99",
"extab_", "extabindex_", "_extab", "_exidx"
2021-10-04 02:12:53 +00:00
]
# DOL info
TEXT_SECTION_COUNT = 7
DATA_SECTION_COUNT = 11
SECTION_TEXT = 0
SECTION_DATA = 1
2022-03-29 18:08:41 +00:00
# Progress flavor
CODE_FRAC = 10000 # total code "item" amount
DATA_FRAC = 201 # total data "item" amount
CODE_ITEM = "Pokos" # code flavor item
DATA_ITEM = "treasures" # data flavor item
2022-07-25 05:42:41 +00:00
CSV_FILE_NAME = 'progress.csv'
CSV_FILE_PATH = f'./tools/{CSV_FILE_NAME}'
2022-03-29 18:08:41 +00:00
2021-10-04 02:12:53 +00:00
###############################################
# #
# Entrypoint #
# #
###############################################
2022-08-17 02:28:07 +00:00
def update_csv(
code_count,
decomp_code_size,
code_completion_percentage,
data_count,
decomp_data_size,
data_completion_percentage,
sentence,
):
does_file_exist = False
are_there_changes = True
try:
with open(CSV_FILE_PATH, 'r') as file:
reader = csv.reader(file)
does_file_exist = True
2022-08-17 02:28:07 +00:00
latest_row = list(reader)[-1]
2022-08-17 02:28:07 +00:00
latest_code_size = int(latest_row[1]) # code_completion_in_bytes
latest_data_size = int(latest_row[4]) # data_completion_in_bytes
are_there_changes = not (
decomp_code_size == latest_code_size and decomp_data_size == latest_data_size
)
2022-08-17 02:28:07 +00:00
print(f"Successfully read {CSV_FILE_PATH}!")
except:
print(f'Failed to read {CSV_FILE_PATH}!')
if not are_there_changes:
print("No changes detected. Exiting...")
return
2022-08-17 02:28:07 +00:00
col_one = f"code_count_in_{CODE_ITEM.lower()}"
col_two = "code_completion_in_bytes"
col_three = "code_completion_in_percentage"
col_four = f"data_count_in_{DATA_ITEM.lower()}"
col_five = "data_completion_in_bytes"
col_six = "data_completion_in_percentage"
col_seven = "sentence"
col_eight = "created_at"
headers = [
col_one,
col_two,
col_three,
col_four,
col_five,
col_six,
col_seven,
col_eight,
]
try:
with open(CSV_FILE_PATH, 'a', newline='') as file:
writer = csv.DictWriter(file, fieldnames=headers)
# only add headers if this is the first iteration of the file
if not does_file_exist:
writer.writeheader()
writer.writerow({
col_one: code_count,
col_two: decomp_code_size,
col_three: code_completion_percentage,
col_four: data_count,
col_five: decomp_data_size,
col_six: data_completion_percentage,
col_seven: sentence,
col_eight: datetime.now(),
})
print(f"Successfully wrote to {CSV_FILE_PATH}!")
except:
print(f"Failed to write to {CSV_FILE_PATH}!")
2022-08-17 02:28:07 +00:00
2021-10-04 02:12:53 +00:00
if __name__ == "__main__":
2022-08-17 02:28:07 +00:00
# HACK: Check asm or src in obj_file.mk
# to avoid counting .comm/.lcomm as decompiled
asm_objs = []
with open('obj_files.mk', 'r') as file:
for line in file:
if "asm/" in line:
asm_objs.append(line.strip().rsplit('/', 1)[-1].rstrip('\\'))
2021-10-04 02:12:53 +00:00
# Sum up DOL section sizes
2022-04-08 23:11:12 +00:00
dol_handle = open(sys.argv[1], "rb")
2021-10-04 02:12:53 +00:00
# Seek to virtual addresses
dol_handle.seek(0x48)
2022-08-17 02:28:07 +00:00
2021-10-04 02:12:53 +00:00
# Read virtual addresses
text_starts = list()
for i in range(TEXT_SECTION_COUNT):
text_starts.append(int.from_bytes(dol_handle.read(4), byteorder='big'))
data_starts = list()
for i in range(DATA_SECTION_COUNT):
data_starts.append(int.from_bytes(dol_handle.read(4), byteorder='big'))
# Read lengths
text_sizes = list()
for i in range(TEXT_SECTION_COUNT):
text_sizes.append(int.from_bytes(dol_handle.read(4), byteorder='big'))
data_sizes = list()
for i in range(DATA_SECTION_COUNT):
data_sizes.append(int.from_bytes(dol_handle.read(4), byteorder='big'))
# BSS address + length
bss_start = int.from_bytes(dol_handle.read(4), byteorder='big')
bss_size = int.from_bytes(dol_handle.read(4), byteorder='big')
bss_end = bss_start + bss_size
dol_code_size = 0
dol_data_size = 0
for i in range(DATA_SECTION_COUNT):
# Ignore sections inside BSS
2022-08-17 02:28:07 +00:00
if (data_starts[i] >= bss_start) and (data_starts[i] + data_sizes[i] <= bss_end):
continue
2021-10-04 02:12:53 +00:00
dol_data_size += data_sizes[i]
dol_data_size += bss_size
for i in text_sizes:
dol_code_size += i
# Open map file
2022-04-08 23:11:12 +00:00
mapfile = open(sys.argv[2], "r")
2021-10-04 02:12:53 +00:00
symbols = mapfile.readlines()
decomp_code_size = 0
decomp_data_size = 0
section_type = None
# Find first section
first_section = 0
2022-08-17 02:28:07 +00:00
while (symbols[first_section].startswith(".") == False and "section layout" not in symbols[first_section]):
first_section += 1
2021-10-04 02:12:53 +00:00
assert(first_section < len(symbols)), "Map file contains no sections!!!"
2022-08-17 02:28:07 +00:00
cur_object = None
cur_size = 0
j = 0
2021-10-04 02:12:53 +00:00
for i in range(first_section, len(symbols)):
# New section
if (symbols[i].startswith(".") == True or "section layout" in symbols[i]):
# Grab section name (i.e. ".init section layout" -> "init")
sectionName = re.search(r"\.*(?P<Name>\w+)\s", symbols[i]).group("Name")
# Determine type of section
section_type = SECTION_DATA if (sectionName in DATA_SECTIONS) else SECTION_TEXT
# Parse symbols until we hit the next section declaration
else:
2022-08-17 02:28:07 +00:00
if "UNUSED" in symbols[i]:
continue
if "entry of" in symbols[i]:
if j == i - 1:
if section_type == SECTION_TEXT:
decomp_code_size -= cur_size
else:
decomp_data_size -= cur_size
cur_size = 0
#print(f"Line* {j}: {symbols[j]}")
#print(f"Line {i}: {symbols[i]}")
continue
2021-10-04 02:12:53 +00:00
assert(section_type != None), f"Symbol found outside of a section!!!\n{symbols[i]}"
match_obj = re.search(REGEX_TO_USE, symbols[i])
# Should be a symbol in ASM (so we discard it)
if (match_obj == None):
#print(f"Line {i}: {symbols[i]}")
continue
# Has the object file changed?
last_object = cur_object
cur_object = match_obj.group("Object").strip()
2022-08-17 02:28:07 +00:00
if last_object != cur_object or cur_object in asm_objs:
continue
2021-10-04 02:12:53 +00:00
# Is the symbol a file-wide section?
symb = match_obj.group("Symbol")
2022-08-17 02:28:07 +00:00
if (symb.startswith("*fill*")) or (symb.startswith(".") and symb[1:] in TEXT_SECTIONS or symb[1:] in DATA_SECTIONS):
continue
2021-10-04 02:12:53 +00:00
# For sections that don't start with "."
2022-08-17 02:28:07 +00:00
if (symb in DATA_SECTIONS):
continue
2021-10-04 02:12:53 +00:00
# If not, we accumulate the file size
cur_size = int(match_obj.group("Size"), 16)
j = i
2021-10-04 02:12:53 +00:00
if (section_type == SECTION_TEXT):
decomp_code_size += cur_size
2021-10-04 02:12:53 +00:00
else:
decomp_data_size += cur_size
2021-10-04 02:12:53 +00:00
# Calculate percentages
2022-08-17 02:28:07 +00:00
codeCompletionPcnt = (decomp_code_size / dol_code_size) # code completion percent
dataCompletionPcnt = (decomp_data_size / dol_data_size) # data completion percent
bytesPerCodeItem = dol_code_size / CODE_FRAC # bytes per code item
bytesPerDataItem = dol_data_size / DATA_FRAC # bytes per data item
2022-03-29 18:08:41 +00:00
codeCount = math.floor(decomp_code_size / bytesPerCodeItem)
dataCount = math.floor(decomp_data_size / bytesPerDataItem)
2021-10-04 02:12:53 +00:00
print("Progress:")
print(f"\tCode sections: {decomp_code_size} / {dol_code_size}\tbytes in src ({codeCompletionPcnt:%})")
print(f"\tData sections: {decomp_data_size} / {dol_data_size}\tbytes in src ({dataCompletionPcnt:%})")
2022-08-17 02:28:07 +00:00
sentence = f"\nYou have {codeCount} out of {CODE_FRAC} {CODE_ITEM} and {dataCount} out of {DATA_FRAC} {DATA_ITEM}."
print(sentence)
update_csv(
code_count=codeCount,
decomp_code_size=decomp_code_size,
code_completion_percentage=codeCompletionPcnt,
data_count=dataCount,
decomp_data_size=decomp_data_size,
data_completion_percentage=dataCompletionPcnt,
sentence=sentence
)