Add automatic Crowdin synchronization (#248)

This commit is contained in:
Michael Burgardt 2021-11-30 22:14:15 +01:00 committed by GitHub
parent 033e067285
commit 73ed8cff7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 16912 additions and 321 deletions

33
.github/workflows/crowdin_prep.yml vendored Normal file
View File

@ -0,0 +1,33 @@
# Prepare source texts & upload them to Crowdin
name: Crowdin Source Texts Upload
# on change to the English texts
on:
push:
branches:
- master
paths:
- 'src/platform/libretro/libretro_core_options.h'
jobs:
upload_source_file:
runs-on: ubuntu-latest
steps:
- name: Setup Java JDK
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Setup Python
uses: actions/setup-python@v2
- name: Checkout
uses: actions/checkout@v2
- name: Upload Source
shell: bash
env:
CROWDIN_API_KEY: ${{ secrets.CROWDIN_API_KEY }}
run: |
python3 intl/upload_workflow.py $CROWDIN_API_KEY "mgba" "src/platform/libretro/libretro_core_options.h"

46
.github/workflows/crowdin_translate.yml vendored Normal file
View File

@ -0,0 +1,46 @@
# Download translations form Crowdin & Recreate libretro_core_options_intl.h
name: Crowdin Translation Integration
on:
schedule:
# please choose a random time & weekday to avoid all repos synching at the same time
- cron: '25 16 * * 5' # Fridays at 4:25 PM, UTC
jobs:
create_intl_file:
runs-on: ubuntu-latest
steps:
- name: Setup Java JDK
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Setup Python
uses: actions/setup-python@v2
- name: Checkout
uses: actions/checkout@v2
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal access token.
fetch-depth: 0 # otherwise, there would be errors pushing refs to the destination repository.
- name: Create intl file
shell: bash
env:
CROWDIN_API_KEY: ${{ secrets.CROWDIN_API_KEY }}
run: |
python3 intl/download_workflow.py $CROWDIN_API_KEY "mgba" "src/platform/libretro/libretro_core_options_intl.h"
- name: Commit files
run: |
git config --local user.email "github-actions@github.com"
git config --local user.name "github-actions[bot]"
git add intl/*_workflow.py "src/platform/libretro/libretro_core_options_intl.h"
git commit -m "Fetch translations & Recreate libretro_core_options_intl.h"
- name: GitHub Push
uses: ad-m/github-push-action@v0.6.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}

4
intl/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
__pycache__
crowdin-cli.jar
*.h
*.json

70
intl/activate.py Normal file
View File

@ -0,0 +1,70 @@
#!/usr/bin/env python3
import os
import glob
import random as r
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
if os.path.basename(DIR_PATH) != "intl":
raise RuntimeError("Script is not in intl folder!")
BASE_PATH = os.path.dirname(DIR_PATH)
WORKFLOW_PATH = os.path.join(BASE_PATH, ".github", "workflows")
PREP_WF = os.path.join(WORKFLOW_PATH, "crowdin_prep.yml")
TRANSLATE_WF = os.path.join(WORKFLOW_PATH, "crowdin_translate.yml")
CORE_NAME = os.path.basename(BASE_PATH)
CORE_OP_FILE = os.path.join(BASE_PATH, "**", "libretro_core_options.h")
core_options_hits = glob.glob(CORE_OP_FILE, recursive=True)
if len(core_options_hits) == 0:
raise RuntimeError("libretro_core_options.h not found!")
elif len(core_options_hits) > 1:
print("More than one libretro_core_options.h file found:\n\n")
for i, file in enumerate(core_options_hits):
print(f"{i} {file}\n")
while True:
user_choice = input("Please choose one ('q' will exit): ")
if user_choice == 'q':
exit(0)
elif user_choice.isdigit():
core_op_file = core_options_hits[int(user_choice)]
break
else:
print("Please make a valid choice!\n\n")
else:
core_op_file = core_options_hits[0]
core_intl_file = os.path.join(os.path.dirname(core_op_file.replace(BASE_PATH, ''))[1:],
'libretro_core_options_intl.h')
core_op_file = os.path.join(os.path.dirname(core_op_file.replace(BASE_PATH, ''))[1:],
'libretro_core_options.h')
minutes = r.randrange(0, 59, 5)
hour = r.randrange(0, 23)
with open(PREP_WF, 'r') as wf_file:
prep_txt = wf_file.read()
prep_txt = prep_txt.replace("<CORE_NAME>", CORE_NAME)
prep_txt = prep_txt.replace("<PATH/TO>/libretro_core_options.h",
core_op_file)
with open(PREP_WF, 'w') as wf_file:
wf_file.write(prep_txt)
with open(TRANSLATE_WF, 'r') as wf_file:
translate_txt = wf_file.read()
translate_txt = translate_txt.replace('<0-59>', f"{minutes}")
translate_txt = translate_txt.replace('<0-23>', f"{hour}")
translate_txt = translate_txt.replace('# Fridays at , UTC',
f"# Fridays at {hour%12}:{minutes} {'AM' if hour < 12 else 'PM'}, UTC")
translate_txt = translate_txt.replace("<CORE_NAME>", CORE_NAME)
translate_txt = translate_txt.replace('<PATH/TO>/libretro_core_options_intl.h',
core_intl_file)
with open(TRANSLATE_WF, 'w') as wf_file:
wf_file.write(translate_txt)

95
intl/core_option_regex.py Normal file
View File

@ -0,0 +1,95 @@
import re
# 0: full struct; 1: up to & including first []; 2: content between first {}
p_struct = re.compile(r'(struct\s*[a-zA-Z0-9_\s]+\[])\s*'
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+)\s*)*'
r'=\s*' # =
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+)\s*)*'
r'{((?:.|[\r\n])*?)\{\s*NULL,\s*NULL,\s*NULL\s*(?:.|[\r\n])*?},?(?:.|[\r\n])*?};') # captures full struct, it's beginning and it's content
# 0: type name[]; 1: type; 2: name
p_type_name = re.compile(r'(retro_core_option_[a-zA-Z0-9_]+)\s*'
r'(option_cats([a-z_]{0,8})|option_defs([a-z_]{0,8}))\s*\[]')
# 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs
p_option = re.compile(r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(\".*?\"|' # key start; group 1
r'[a-zA-Z0-9_]+\s*\((?:.|[\r\n])*?\)|'
r'[a-zA-Z0-9_]+\s*\[(?:.|[\r\n])*?]|'
r'[a-zA-Z0-9_]+\s*\".*?\")\s*' # key end
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(\".*?\")\s*' # description; group 2
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'((?:' # group 3
r'(?:NULL|\"(?:.|[\r\n])*?\")\s*' # description in category, info, info in category, category
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r')+)'
r'(?:' # defs only start
r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'((?:' # key/value pairs start; group 4
r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(?:NULL|\".*?\")\s*' # option key
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(?:NULL|\".*?\")\s*' # option value
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'}\s*' # closing braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r')*)' # key/value pairs end
r'}\s*' # closing braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(?:' # defaults start
r'(?:NULL|\".*?\")\s*' # default value
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',?\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r')*' # defaults end
r')?' # defs only end
r'},') # closing braces
# analyse option group 3
p_info = re.compile(r'(NULL|\"(?:.|[\r\n])*?\")\s*' # description in category, info, info in category, category
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',')
p_info_cat = re.compile(r'(NULL|\"(?:.|[\r\n])*?\")')
# analyse option group 4
p_key_value = re.compile(r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(NULL|\".*?\")\s*' # option key; 1
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r',\s*' # comma
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'(NULL|\".*?\")\s*' # option value; 2
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
r'}')
p_masked = re.compile(r'([A-Z_][A-Z0-9_]+)\s*(\"(?:"\s*"|\\\s*|.)*\")')
p_intl = re.compile(r'(struct retro_core_option_definition \*option_defs_intl\[RETRO_LANGUAGE_LAST]) = {'
r'((?:.|[\r\n])*?)};')
p_set = re.compile(r'static INLINE void libretro_set_core_options\(retro_environment_t environ_cb\)'
r'(?:.|[\r\n])*?};?\s*#ifdef __cplusplus\s*}\s*#endif')
p_yaml = re.compile(r'"project_id": "[0-9]+".*\s*'
r'"api_token": "([a-zA-Z0-9]+)".*\s*'
r'"base_path": "\./intl".*\s*'
r'"base_url": "https://api\.crowdin\.com".*\s*'
r'"preserve_hierarchy": true.*\s*'
r'"files": \[\s*'
r'\{\s*'
r'"source": "/_us/\*\.json",.*\s*'
r'"translation": "/_%two_letters_code%/%original_file_name%",.*\s*'
r'"skip_untranslated_strings": true.*\s*'
r'},\s*'
r']')

View File

@ -0,0 +1,620 @@
#!/usr/bin/env python3
"""Core options text extractor
The purpose of this script is to set up & provide functions for automatic generation of 'libretro_core_options_intl.h'
from 'libretro_core_options.h' using translations from Crowdin.
Both v1 and v2 structs are supported. It is, however, recommended to convert v1 files to v2 using the included
'v1_to_v2_converter.py'.
Usage:
python3 path/to/core_option_translation.py "path/to/where/libretro_core_options.h & libretro_core_options_intl.h/are" "core_name"
This script will:
1.) create key words for & extract the texts from libretro_core_options.h & save them into intl/_us/core_options.h
2.) do the same for any present translations in libretro_core_options_intl.h, saving those in their respective folder
"""
import core_option_regex as cor
import re
import os
import sys
import json
import urllib.request as req
import shutil
# LANG_CODE_TO_R_LANG = {'_ar': 'RETRO_LANGUAGE_ARABIC',
# '_ast': 'RETRO_LANGUAGE_ASTURIAN',
# '_chs': 'RETRO_LANGUAGE_CHINESE_SIMPLIFIED',
# '_cht': 'RETRO_LANGUAGE_CHINESE_TRADITIONAL',
# '_cs': 'RETRO_LANGUAGE_CZECH',
# '_cy': 'RETRO_LANGUAGE_WELSH',
# '_da': 'RETRO_LANGUAGE_DANISH',
# '_de': 'RETRO_LANGUAGE_GERMAN',
# '_el': 'RETRO_LANGUAGE_GREEK',
# '_eo': 'RETRO_LANGUAGE_ESPERANTO',
# '_es': 'RETRO_LANGUAGE_SPANISH',
# '_fa': 'RETRO_LANGUAGE_PERSIAN',
# '_fi': 'RETRO_LANGUAGE_FINNISH',
# '_fr': 'RETRO_LANGUAGE_FRENCH',
# '_gl': 'RETRO_LANGUAGE_GALICIAN',
# '_he': 'RETRO_LANGUAGE_HEBREW',
# '_hu': 'RETRO_LANGUAGE_HUNGARIAN',
# '_id': 'RETRO_LANGUAGE_INDONESIAN',
# '_it': 'RETRO_LANGUAGE_ITALIAN',
# '_ja': 'RETRO_LANGUAGE_JAPANESE',
# '_ko': 'RETRO_LANGUAGE_KOREAN',
# '_nl': 'RETRO_LANGUAGE_DUTCH',
# '_oc': 'RETRO_LANGUAGE_OCCITAN',
# '_pl': 'RETRO_LANGUAGE_POLISH',
# '_pt_br': 'RETRO_LANGUAGE_PORTUGUESE_BRAZIL',
# '_pt_pt': 'RETRO_LANGUAGE_PORTUGUESE_PORTUGAL',
# '_ru': 'RETRO_LANGUAGE_RUSSIAN',
# '_sk': 'RETRO_LANGUAGE_SLOVAK',
# '_sv': 'RETRO_LANGUAGE_SWEDISH',
# '_tr': 'RETRO_LANGUAGE_TURKISH',
# '_uk': 'RETRO_LANGUAGE_UKRAINIAN',
# '_us': 'RETRO_LANGUAGE_ENGLISH',
# '_vn': 'RETRO_LANGUAGE_VIETNAMESE'}
# these are handled by RetroArch directly - no need to include them in core translations
ON_OFFS = {'"enabled"', '"disabled"', '"true"', '"false"', '"on"', '"off"'}
def remove_special_chars(text: str, char_set=0, allow_non_ascii=False) -> str:
"""Removes special characters from a text.
:param text: String to be cleaned.
:param char_set: 0 -> remove all ASCII special chars except for '_' & 'space' (default)
1 -> remove invalid chars from file names
:param allow_non_ascii: False -> all non-ascii characters will be removed (default)
True -> non-ascii characters will be passed through
:return: Clean text.
"""
command_chars = [chr(unicode) for unicode in tuple(range(0, 32)) + (127,)]
special_chars = ([chr(unicode) for unicode in tuple(range(33, 48)) + tuple(range(58, 65)) + tuple(range(91, 95))
+ (96,) + tuple(range(123, 127))],
('\\', '/', ':', '*', '?', '"', '<', '>', '|', '#', '%',
'&', '{', '}', '$', '!', '¸', "'", '@', '+', '='))
res = text if allow_non_ascii \
else text.encode('ascii', errors='ignore').decode('unicode-escape')
for cm in command_chars:
res = res.replace(cm, '_')
for sp in special_chars[char_set]:
res = res.replace(sp, '_')
while res.startswith('_'):
res = res[1:]
while res.endswith('_'):
res = res[:-1]
return res
def clean_file_name(file_name: str) -> str:
"""Removes characters which might make file_name inappropriate for files on some OS.
:param file_name: File name to be cleaned.
:return: The clean file name.
"""
file_name = remove_special_chars(file_name, 1)
file_name = re.sub(r'__+', '_', file_name.replace(' ', '_'))
return file_name
def get_struct_type_name(decl: str) -> tuple:
""" Returns relevant parts of the struct declaration:
type, name of the struct and the language appendix, if present.
:param decl: The struct declaration matched by cor.p_type_name.
:return: Tuple, e.g.: ('retro_core_option_definition', 'option_defs_us', '_us')
"""
struct_match = cor.p_type_name.search(decl)
if struct_match:
if struct_match.group(3):
struct_type_name = struct_match.group(1, 2, 3)
return struct_type_name
elif struct_match.group(4):
struct_type_name = struct_match.group(1, 2, 4)
return struct_type_name
else:
struct_type_name = struct_match.group(1, 2)
return struct_type_name
else:
raise ValueError(f'No or incomplete struct declaration: {decl}!\n'
'Please make sure all structs are complete, including the type and name declaration.')
def is_viable_non_dupe(text: str, comparison) -> bool:
"""text must be longer than 2 ('""'), not 'NULL' and not in comparison.
:param text: String to be tested.
:param comparison: Dictionary or set to search for text in.
:return: bool
"""
return 2 < len(text) and text != 'NULL' and text not in comparison
def is_viable_value(text: str) -> bool:
"""text must be longer than 2 ('""'), not 'NULL' and text.lower() not in
{'"enabled"', '"disabled"', '"true"', '"false"', '"on"', '"off"'}.
:param text: String to be tested.
:return: bool
"""
return 2 < len(text) and text != 'NULL' and text.lower() not in ON_OFFS
def create_non_dupe(base_name: str, opt_num: int, comparison) -> str:
"""Makes sure base_name is not in comparison, and if it is it's renamed.
:param base_name: Name to check/make unique.
:param opt_num: Number of the option base_name belongs to, used in making it unique.
:param comparison: Dictionary or set to search for base_name in.
:return: Unique name.
"""
h = base_name
if h in comparison:
n = 0
h = h + '_O' + str(opt_num)
h_end = len(h)
while h in comparison:
h = h[:h_end] + '_' + str(n)
n += 1
return h
def get_texts(text: str) -> dict:
"""Extracts the strings, which are to be translated/are the translations,
from text and creates macro names for them.
:param text: The string to be parsed.
:return: Dictionary of the form { '_<lang>': { 'macro': 'string', ... }, ... }.
"""
# all structs: group(0) full struct, group(1) beginning, group(2) content
structs = cor.p_struct.finditer(text)
hash_n_string = {}
just_string = {}
for struct in structs:
struct_declaration = struct.group(1)
struct_type_name = get_struct_type_name(struct_declaration)
if 3 > len(struct_type_name):
lang = '_us'
else:
lang = struct_type_name[2]
if lang not in just_string:
hash_n_string[lang] = {}
just_string[lang] = set()
is_v2 = False
pre_name = ''
p = cor.p_info
if 'retro_core_option_v2_definition' == struct_type_name[0]:
is_v2 = True
elif 'retro_core_option_v2_category' == struct_type_name[0]:
pre_name = 'CATEGORY_'
p = cor.p_info_cat
struct_content = struct.group(2)
# 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs
struct_options = cor.p_option.finditer(struct_content)
for opt, option in enumerate(struct_options):
# group 1: key
if option.group(1):
opt_name = pre_name + option.group(1)
# no special chars allowed in key
opt_name = remove_special_chars(opt_name).upper().replace(' ', '_')
else:
raise ValueError(f'No option name (key) found in struct {struct_type_name[1]} option {opt}!')
# group 2: description0
if option.group(2):
desc0 = option.group(2)
if is_viable_non_dupe(desc0, just_string[lang]):
just_string[lang].add(desc0)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_LABEL'), opt, hash_n_string[lang])
hash_n_string[lang][m_h] = desc0
else:
raise ValueError(f'No label found in struct {struct_type_name[1]} option {option.group(1)}!')
# group 3: desc1, info0, info1, category
if option.group(3):
infos = option.group(3)
option_info = p.finditer(infos)
if is_v2:
desc1 = next(option_info).group(1)
if is_viable_non_dupe(desc1, just_string[lang]):
just_string[lang].add(desc1)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_LABEL_CAT'), opt, hash_n_string[lang])
hash_n_string[lang][m_h] = desc1
last = None
m_h = None
for j, info in enumerate(option_info):
last = info.group(1)
if is_viable_non_dupe(last, just_string[lang]):
just_string[lang].add(last)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_INFO_{j}'), opt,
hash_n_string[lang])
hash_n_string[lang][m_h] = last
if last in just_string[lang]: # category key should not be translated
hash_n_string[lang].pop(m_h)
just_string[lang].remove(last)
else:
for j, info in enumerate(option_info):
gr1 = info.group(1)
if is_viable_non_dupe(gr1, just_string[lang]):
just_string[lang].add(gr1)
m_h = create_non_dupe(re.sub(r'__+', '_', f'{opt_name}_INFO_{j}'), opt,
hash_n_string[lang])
hash_n_string[lang][m_h] = gr1
else:
raise ValueError(f'Too few arguments in struct {struct_type_name[1]} option {option.group(1)}!')
# group 4:
if option.group(4):
for j, kv_set in enumerate(cor.p_key_value.finditer(option.group(4))):
set_key, set_value = kv_set.group(1, 2)
if not is_viable_value(set_value):
if not is_viable_value(set_key):
continue
set_value = set_key
# re.fullmatch(r'(?:[+-][0-9]+)+', value[1:-1])
if set_value not in just_string[lang] and not re.sub(r'[+-]', '', set_value[1:-1]).isdigit():
clean_key = set_key[1:-1]
clean_key = remove_special_chars(clean_key).upper().replace(' ', '_')
m_h = create_non_dupe(re.sub(r'__+', '_', f"OPTION_VAL_{clean_key}"), opt, hash_n_string[lang])
hash_n_string[lang][m_h] = set_value
just_string[lang].add(set_value)
return hash_n_string
def create_msg_hash(intl_dir_path: str, core_name: str, keyword_string_dict: dict) -> dict:
"""Creates '<core_name>.h' files in 'intl/_<lang>/' containing the macro name & string combinations.
:param intl_dir_path: Path to the intl directory.
:param core_name: Name of the core, used for the files' paths.
:param keyword_string_dict: Dictionary of the form { '_<lang>': { 'macro': 'string', ... }, ... }.
:return: Dictionary of the form { '_<lang>': 'path/to/file (./intl/_<lang>/<core_name>.h)', ... }.
"""
files = {}
for localisation in keyword_string_dict:
path = os.path.join(intl_dir_path, core_name) # intl/<core_name>/
files[localisation] = os.path.join(path, localisation + '.h') # intl/<core_name>/_<lang>.h
if not os.path.exists(path):
os.makedirs(path)
with open(files[localisation], 'w', encoding='utf-8') as crowdin_file:
out_text = ''
for keyword in keyword_string_dict[localisation]:
out_text = f'{out_text}{keyword} {keyword_string_dict[localisation][keyword]}\n'
crowdin_file.write(out_text)
return files
def h2json(file_paths: dict) -> dict:
"""Converts .h files pointed to by file_paths into .jsons.
:param file_paths: Dictionary of the form { '_<lang>': 'path/to/file (./intl/_<lang>/<core_name>.h)', ... }.
:return: Dictionary of the form { '_<lang>': 'path/to/file (./intl/_<lang>/<core_name>.json)', ... }.
"""
jsons = {}
for file_lang in file_paths:
if not os.path.isfile(file_paths[file_lang]):
continue
jsons[file_lang] = file_paths[file_lang][:-2] + '.json'
p = cor.p_masked
with open(file_paths[file_lang], 'r+', encoding='utf-8') as h_file:
text = h_file.read()
result = p.finditer(text)
messages = {}
for msg in result:
key, val = msg.group(1, 2)
if key not in messages:
if key and val:
# unescape & remove "\n"
messages[key] = re.sub(r'"\s*(?:(?:/\*(?:.|[\r\n])*?\*/|//.*[\r\n]+)\s*)*"',
'\\\n', val[1:-1].replace('\\\"', '"'))
else:
print(f"DUPLICATE KEY in {file_paths[file_lang]}: {key}")
with open(jsons[file_lang], 'w', encoding='utf-8') as json_file:
json.dump(messages, json_file, indent=2)
return jsons
def json2h(intl_dir_path: str, file_list) -> None:
"""Converts .json file in json_file_path into an .h ready to be included in C code.
:param intl_dir_path: Path to the intl/<core_name> directory.
:param file_list: Iterator of os.DirEntry objects. Contains localisation files to convert.
:return: None
"""
p = cor.p_masked
def update(s_messages, s_template, s_source_messages, file_name):
translation = ''
template_messages = p.finditer(s_template)
for tp_msg in template_messages:
old_key = tp_msg.group(1)
if old_key in s_messages and s_messages[old_key] != s_source_messages[old_key]:
tl_msg_val = s_messages[old_key]
tl_msg_val = tl_msg_val.replace('"', '\\\"').replace('\n', '') # escape
translation = ''.join((translation, '#define ', old_key, file_name.upper(), f' "{tl_msg_val}"\n'))
else: # Remove English duplicates and non-translatable strings
translation = ''.join((translation, '#define ', old_key, file_name.upper(), ' NULL\n'))
return translation
us_h = os.path.join(intl_dir_path, '_us.h')
us_json = os.path.join(intl_dir_path, '_us.json')
with open(us_h, 'r', encoding='utf-8') as template_file:
template = template_file.read()
with open(us_json, 'r+', encoding='utf-8') as source_json_file:
source_messages = json.load(source_json_file)
for file in file_list:
if file.name.lower().startswith('_us') \
or file.name.lower().endswith('.h') \
or file.is_dir():
continue
with open(file.path, 'r+', encoding='utf-8') as json_file:
messages = json.load(json_file)
new_translation = update(messages, template, source_messages, os.path.splitext(file.name)[0])
with open(os.path.splitext(file.path)[0] + '.h', 'w', encoding='utf-8') as h_file:
h_file.seek(0)
h_file.write(new_translation)
h_file.truncate()
return
def get_crowdin_client(dir_path: str) -> str:
"""Makes sure the Crowdin CLI client is present. If it isn't, it is fetched & extracted.
:return: The path to 'crowdin-cli.jar'.
"""
jar_name = 'crowdin-cli.jar'
jar_path = os.path.join(dir_path, jar_name)
if not os.path.isfile(jar_path):
print('Downloading crowdin-cli.jar')
crowdin_cli_file = os.path.join(dir_path, 'crowdin-cli.zip')
crowdin_cli_url = 'https://downloads.crowdin.com/cli/v3/crowdin-cli.zip'
req.urlretrieve(crowdin_cli_url, crowdin_cli_file)
import zipfile
with zipfile.ZipFile(crowdin_cli_file, 'r') as zip_ref:
jar_dir = zip_ref.namelist()[0]
for file in zip_ref.namelist():
if file.endswith(jar_name):
jar_file = file
break
zip_ref.extract(jar_file)
os.rename(jar_file, jar_path)
os.remove(crowdin_cli_file)
shutil.rmtree(jar_dir)
return jar_path
def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str, file_path: str) -> None:
"""Creates 'libretro_core_options_intl.h' from Crowdin translations.
:param localisation_file_path: Path to 'libretro_core_options_intl.h'
:param intl_dir_path: Path to the intl/<core_name> directory.
:param text: Content of the 'libretro_core_options.h' being translated.
:param file_path: Path to the '_us.h' file, containing the original English texts.
:return: None
"""
msg_dict = {}
lang_up = ''
def replace_pair(pair_match):
"""Replaces a key-value-pair of an option with the macros corresponding to the language.
:param pair_match: The re match object representing the key-value-pair block.
:return: Replacement string.
"""
offset = pair_match.start(0)
if pair_match.group(1): # key
if pair_match.group(2) in msg_dict: # value
val = msg_dict[pair_match.group(2)] + lang_up
elif pair_match.group(1) in msg_dict: # use key if value not viable (e.g. NULL)
val = msg_dict[pair_match.group(1)] + lang_up
else:
return pair_match.group(0)
else:
return pair_match.group(0)
res = pair_match.group(0)[:pair_match.start(2) - offset] + val \
+ pair_match.group(0)[pair_match.end(2) - offset:]
return res
def replace_info(info_match):
"""Replaces the 'additional strings' of an option with the macros corresponding to the language.
:param info_match: The re match object representing the 'additional strings' block.
:return: Replacement string.
"""
offset = info_match.start(0)
if info_match.group(1) in msg_dict:
res = info_match.group(0)[:info_match.start(1) - offset] + \
msg_dict[info_match.group(1)] + lang_up + \
info_match.group(0)[info_match.end(1) - offset:]
return res
else:
return info_match.group(0)
def replace_option(option_match):
"""Replaces strings within an option
'{ "opt_key", "label", "additional strings", ..., { {"key", "value"}, ... }, ... }'
within a struct with the macros corresponding to the language:
'{ "opt_key", MACRO_LABEL, MACRO_STRINGS, ..., { {"key", MACRO_VALUE}, ... }, ... }'
:param option_match: The re match object representing the option.
:return: Replacement string.
"""
# label
offset = option_match.start(0)
if option_match.group(2):
res = option_match.group(0)[:option_match.start(2) - offset] + msg_dict[option_match.group(2)] + lang_up
else:
return option_match.group(0)
# additional block
if option_match.group(3):
res = res + option_match.group(0)[option_match.end(2) - offset:option_match.start(3) - offset]
new_info = p.sub(replace_info, option_match.group(3))
res = res + new_info
else:
return res + option_match.group(0)[option_match.end(2) - offset:]
# key-value-pairs
if option_match.group(4):
res = res + option_match.group(0)[option_match.end(3) - offset:option_match.start(4) - offset]
new_pairs = cor.p_key_value.sub(replace_pair, option_match.group(4))
res = res + new_pairs + option_match.group(0)[option_match.end(4) - offset:]
else:
res = res + option_match.group(0)[option_match.end(3) - offset:]
return res
# ------------------------------------------------------------------------------------
with open(file_path, 'r+', encoding='utf-8') as template: # intl/<core_name>/_us.h
masked_msgs = cor.p_masked.finditer(template.read())
for msg in masked_msgs:
msg_dict[msg.group(2)] = msg.group(1)
# top of the file - in case there is no file to copy it from
out_txt = "#ifndef LIBRETRO_CORE_OPTIONS_INTL_H__\n" \
"#define LIBRETRO_CORE_OPTIONS_INTL_H__\n\n" \
"#if defined(_MSC_VER) && (_MSC_VER >= 1500 && _MSC_VER < 1900)\n" \
"/* https://support.microsoft.com/en-us/kb/980263 */\n" \
'#pragma execution_character_set("utf-8")\n' \
"#pragma warning(disable:4566)\n" \
"#endif\n\n" \
"#include <libretro.h>\n\n" \
'#ifdef __cplusplus\n' \
'extern "C" {\n' \
'#endif\n'
if os.path.isfile(localisation_file_path):
# copy top of the file for re-use
with open(localisation_file_path, 'r', encoding='utf-8') as intl: # libretro_core_options_intl.h
in_text = intl.read()
intl_start = re.search(re.escape('/*\n'
' ********************************\n'
' * Core Option Definitions\n'
' ********************************\n'
'*/\n'), in_text)
if intl_start:
out_txt = in_text[:intl_start.end(0)]
else:
intl_start = re.search(re.escape('#ifdef __cplusplus\n'
'extern "C" {\n'
'#endif\n'), in_text)
if intl_start:
out_txt = in_text[:intl_start.end(0)]
# only write to file, if there is anything worthwhile to write!
overwrite = False
# iterate through localisation files
files = {}
for file in os.scandir(intl_dir_path):
files[file.name] = {'is_file': file.is_file(), 'path': file.path}
for file in sorted(files): # intl/<core_name>/_*
if files[file]['is_file'] \
and file.startswith('_') \
and file.endswith('.h') \
and not file.startswith('_us'):
translation_path = files[file]['path'] # <core_name>_<lang>.h
# all structs: group(0) full struct, group(1) beginning, group(2) content
struct_groups = cor.p_struct.finditer(text)
lang_low = os.path.splitext(file)[0].lower()
lang_up = lang_low.upper()
out_txt = out_txt + f'/* RETRO_LANGUAGE{lang_up} */\n\n' # /* RETRO_LANGUAGE_NM */
# copy adjusted translations (makros)
with open(translation_path, 'r+', encoding='utf-8') as f_in: # <core name>.h
out_txt = out_txt + f_in.read() + '\n'
# replace English texts with makros
for construct in struct_groups:
declaration = construct.group(1)
struct_type_name = get_struct_type_name(declaration)
if 3 > len(struct_type_name): # no language specifier
new_decl = re.sub(re.escape(struct_type_name[1]), struct_type_name[1] + lang_low, declaration)
else:
new_decl = re.sub(re.escape(struct_type_name[2]), lang_low, declaration)
if '_us' != struct_type_name[2]:
continue
p = cor.p_info
if 'retro_core_option_v2_category' == struct_type_name[0]:
p = cor.p_info_cat
offset_construct = construct.start(0)
start = construct.end(1) - offset_construct
end = construct.start(2) - offset_construct
out_txt = out_txt + new_decl + construct.group(0)[start:end]
content = construct.group(2)
new_content = cor.p_option.sub(replace_option, content)
start = construct.end(2) - offset_construct
out_txt = out_txt + new_content + construct.group(0)[start:] + '\n'
# for v2
if 'retro_core_option_v2_definition' == struct_type_name[0]:
out_txt = out_txt + f'struct retro_core_options_v2 options{lang_low}' \
' = {\n' \
f' option_cats{lang_low},\n' \
f' option_defs{lang_low}\n' \
'};\n\n'
# if it got this far, we've got something to write
overwrite = True
# only write to file, if there is anything worthwhile to write!
if overwrite:
with open(localisation_file_path, 'w', encoding='utf-8') as intl:
intl.write(out_txt + '\n#ifdef __cplusplus\n'
'}\n#endif\n'
'\n#endif')
return
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
try:
if os.path.isfile(sys.argv[1]):
_temp = os.path.dirname(sys.argv[1])
else:
_temp = sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
TARGET_DIR_PATH = _temp
except IndexError:
TARGET_DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + TARGET_DIR_PATH)
CORE_NAME = clean_file_name(sys.argv[2])
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
H_FILE_PATH = os.path.join(TARGET_DIR_PATH, 'libretro_core_options.h')
INTL_FILE_PATH = os.path.join(TARGET_DIR_PATH, 'libretro_core_options_intl.h')
print('Getting texts from libretro_core_options.h')
with open(H_FILE_PATH, 'r+', encoding='utf-8') as _h_file:
_main_text = _h_file.read()
_hash_n_str = get_texts(_main_text)
_files = create_msg_hash(DIR_PATH, CORE_NAME, _hash_n_str)
_source_jsons = h2json(_files)
print('Getting texts from libretro_core_options_intl.h')
if os.path.isfile(INTL_FILE_PATH):
with open(INTL_FILE_PATH, 'r+', encoding='utf-8') as _intl_file:
_intl_text = _intl_file.read()
_hash_n_str_intl = get_texts(_intl_text)
_intl_files = create_msg_hash(DIR_PATH, CORE_NAME, _hash_n_str_intl)
_intl_jsons = h2json(_intl_files)
print('\nAll done!')

13
intl/crowdin.yaml Normal file
View File

@ -0,0 +1,13 @@
"project_id": "380544"
"api_token": "_secret_"
"base_url": "https://api.crowdin.com"
"preserve_hierarchy": true
"files":
[
{
"source": "/intl/_core_name_/_us.json",
"dest": "/_core_name_/_core_name_.json",
"translation": "/intl/_core_name_/_%two_letters_code%.json",
},
]

30
intl/crowdin_prep.py Normal file
View File

@ -0,0 +1,30 @@
#!/usr/bin/env python3
import core_option_translation as t
if __name__ == '__main__':
try:
if t.os.path.isfile(t.sys.argv[1]):
_temp = t.os.path.dirname(t.sys.argv[1])
else:
_temp = t.sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
TARGET_DIR_PATH = _temp
except IndexError:
TARGET_DIR_PATH = t.os.path.dirname(t.os.path.dirname(t.os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + TARGET_DIR_PATH)
CORE_NAME = t.clean_file_name(t.sys.argv[2])
DIR_PATH = t.os.path.dirname(t.os.path.realpath(__file__))
H_FILE_PATH = t.os.path.join(TARGET_DIR_PATH, 'libretro_core_options.h')
print('Getting texts from libretro_core_options.h')
with open(H_FILE_PATH, 'r+', encoding='utf-8') as _h_file:
_main_text = _h_file.read()
_hash_n_str = t.get_texts(_main_text)
_files = t.create_msg_hash(DIR_PATH, CORE_NAME, _hash_n_str)
_source_jsons = t.h2json(_files)
print('\nAll done!')

View File

@ -0,0 +1,93 @@
#!/usr/bin/env python3
import re
import os
import shutil
import subprocess
import sys
import urllib.request
import zipfile
import core_option_translation as t
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
# Check Crowdin API Token and core name
try:
API_KEY = sys.argv[1]
CORE_NAME = t.clean_file_name(sys.argv[2])
except IndexError as e:
print('Please provide Crowdin API Token and core name!')
raise e
DIR_PATH = t.os.path.dirname(t.os.path.realpath(__file__))
YAML_PATH = t.os.path.join(DIR_PATH, 'crowdin.yaml')
# Apply Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": "_secret_"',
f'"api_token": "{API_KEY}"',
crowdin_config, 1)
crowdin_config = re.sub(r'/_core_name_',
f'/{CORE_NAME}'
, crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
try:
# Download Crowdin CLI
jar_name = 'crowdin-cli.jar'
jar_path = t.os.path.join(DIR_PATH, jar_name)
crowdin_cli_file = 'crowdin-cli.zip'
crowdin_cli_url = 'https://downloads.crowdin.com/cli/v3/' + crowdin_cli_file
crowdin_cli_path = t.os.path.join(DIR_PATH, crowdin_cli_file)
if not os.path.isfile(t.os.path.join(DIR_PATH, jar_name)):
print('download crowdin-cli.jar')
urllib.request.urlretrieve(crowdin_cli_url, crowdin_cli_path)
with zipfile.ZipFile(crowdin_cli_path, 'r') as zip_ref:
jar_dir = t.os.path.join(DIR_PATH, zip_ref.namelist()[0])
for file in zip_ref.namelist():
if file.endswith(jar_name):
jar_file = file
break
zip_ref.extract(jar_file, path=DIR_PATH)
os.rename(t.os.path.join(DIR_PATH, jar_file), jar_path)
os.remove(crowdin_cli_path)
shutil.rmtree(jar_dir)
print('upload source *.json')
subprocess.run(['java', '-jar', jar_path, 'upload', 'sources', '--config', YAML_PATH])
# Reset Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": ".*?"',
'"api_token": "_secret_"',
crowdin_config, 1)
# TODO this is NOT safe!
crowdin_config = re.sub(re.escape(f'/{CORE_NAME}'),
'/_core_name_',
crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
except Exception as e:
# Try really hard to reset Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": ".*?"',
'"api_token": "_secret_"',
crowdin_config, 1)
# TODO this is NOT safe!
crowdin_config = re.sub(re.escape(f'/{CORE_NAME}'),
'/_core_name_',
crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
raise e

39
intl/crowdin_translate.py Normal file
View File

@ -0,0 +1,39 @@
#!/usr/bin/env python3
import core_option_translation as t
if __name__ == '__main__':
try:
if t.os.path.isfile(t.sys.argv[1]):
_temp = t.os.path.dirname(t.sys.argv[1])
else:
_temp = t.sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
TARGET_DIR_PATH = _temp
except IndexError:
TARGET_DIR_PATH = t.os.path.dirname(t.os.path.dirname(t.os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + TARGET_DIR_PATH)
CORE_NAME = t.clean_file_name(t.sys.argv[2])
DIR_PATH = t.os.path.dirname(t.os.path.realpath(__file__))
LOCALISATIONS_PATH = t.os.path.join(DIR_PATH, CORE_NAME)
US_FILE_PATH = t.os.path.join(LOCALISATIONS_PATH, '_us.h')
H_FILE_PATH = t.os.path.join(TARGET_DIR_PATH, 'libretro_core_options.h')
INTL_FILE_PATH = t.os.path.join(TARGET_DIR_PATH, 'libretro_core_options_intl.h')
print('Getting texts from libretro_core_options.h')
with open(H_FILE_PATH, 'r+', encoding='utf-8') as _h_file:
_main_text = _h_file.read()
_hash_n_str = t.get_texts(_main_text)
_files = t.create_msg_hash(DIR_PATH, CORE_NAME, _hash_n_str)
_source_jsons = t.h2json(_files)
print('Converting translations *.json to *.h:')
localisation_files = t.os.scandir(LOCALISATIONS_PATH)
t.json2h(LOCALISATIONS_PATH, localisation_files)
print('Constructing libretro_core_options_intl.h')
t.create_intl_file(INTL_FILE_PATH, LOCALISATIONS_PATH, _main_text, _files["_us"])
print('\nAll done!')

View File

@ -0,0 +1,93 @@
#!/usr/bin/env python3
import re
import os
import shutil
import subprocess
import sys
import urllib.request
import zipfile
import core_option_translation as t
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
# Check Crowdin API Token and core name
try:
API_KEY = sys.argv[1]
CORE_NAME = t.clean_file_name(sys.argv[2])
except IndexError as e:
print('Please provide Crowdin API Token and core name!')
raise e
DIR_PATH = t.os.path.dirname(t.os.path.realpath(__file__))
YAML_PATH = t.os.path.join(DIR_PATH, 'crowdin.yaml')
# Apply Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": "_secret_"',
f'"api_token": "{API_KEY}"',
crowdin_config, 1)
crowdin_config = re.sub(r'/_core_name_',
f'/{CORE_NAME}'
, crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
try:
# Download Crowdin CLI
jar_name = 'crowdin-cli.jar'
jar_path = t.os.path.join(DIR_PATH, jar_name)
crowdin_cli_file = 'crowdin-cli.zip'
crowdin_cli_url = 'https://downloads.crowdin.com/cli/v3/' + crowdin_cli_file
crowdin_cli_path = t.os.path.join(DIR_PATH, crowdin_cli_file)
if not os.path.isfile(t.os.path.join(DIR_PATH, jar_name)):
print('download crowdin-cli.jar')
urllib.request.urlretrieve(crowdin_cli_url, crowdin_cli_path)
with zipfile.ZipFile(crowdin_cli_path, 'r') as zip_ref:
jar_dir = t.os.path.join(DIR_PATH, zip_ref.namelist()[0])
for file in zip_ref.namelist():
if file.endswith(jar_name):
jar_file = file
break
zip_ref.extract(jar_file, path=DIR_PATH)
os.rename(t.os.path.join(DIR_PATH, jar_file), jar_path)
os.remove(crowdin_cli_path)
shutil.rmtree(jar_dir)
print('download translation *.json')
subprocess.run(['java', '-jar', jar_path, 'download', '--config', YAML_PATH])
# Reset Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": ".*?"',
'"api_token": "_secret_"',
crowdin_config, 1)
# TODO this is NOT safe!
crowdin_config = re.sub(re.escape(f'/{CORE_NAME}'),
'/_core_name_',
crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
except Exception as e:
# Try really hard to reset Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": ".*?"',
'"api_token": "_secret_"',
crowdin_config, 1)
# TODO this is NOT safe!
crowdin_config = re.sub(re.escape(f'/{CORE_NAME}'),
'/_core_name_',
crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
raise e

16
intl/download_workflow.py Normal file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env python3
import sys
import subprocess
try:
api_key = sys.argv[1]
core_name = sys.argv[2]
dir_path = sys.argv[3]
except IndexError as e:
print('Please provide path to libretro_core_options.h, Crowdin API Token and core name!')
raise e
subprocess.run(['python3', 'intl/crowdin_prep.py', dir_path, core_name])
subprocess.run(['python3', 'intl/crowdin_translation_download.py', api_key, core_name])
subprocess.run(['python3', 'intl/crowdin_translate.py', dir_path, core_name])

125
intl/initial_sync.py Normal file
View File

@ -0,0 +1,125 @@
#!/usr/bin/env python3
import re
import os
import shutil
import subprocess
import sys
import time
import urllib.request
import zipfile
import core_option_translation as t
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
# Check Crowdin API Token and core name
try:
API_KEY = sys.argv[1]
CORE_NAME = t.clean_file_name(sys.argv[2])
except IndexError as e:
print('Please provide Crowdin API Token and core name!')
raise e
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
YAML_PATH = os.path.join(DIR_PATH, 'crowdin.yaml')
# Apply Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": "_secret_"',
f'"api_token": "{API_KEY}"',
crowdin_config, 1)
crowdin_config = re.sub(r'/_core_name_',
f'/{CORE_NAME}'
, crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
try:
# Download Crowdin CLI
jar_name = 'crowdin-cli.jar'
jar_path = os.path.join(DIR_PATH, jar_name)
crowdin_cli_file = 'crowdin-cli.zip'
crowdin_cli_url = 'https://downloads.crowdin.com/cli/v3/' + crowdin_cli_file
crowdin_cli_path = os.path.join(DIR_PATH, crowdin_cli_file)
if not os.path.isfile(os.path.join(DIR_PATH, jar_name)):
print('download crowdin-cli.jar')
urllib.request.urlretrieve(crowdin_cli_url, crowdin_cli_path)
with zipfile.ZipFile(crowdin_cli_path, 'r') as zip_ref:
jar_dir = os.path.join(DIR_PATH, zip_ref.namelist()[0])
for file in zip_ref.namelist():
if file.endswith(jar_name):
jar_file = file
break
zip_ref.extract(jar_file, path=DIR_PATH)
os.rename(os.path.join(DIR_PATH, jar_file), jar_path)
os.remove(crowdin_cli_path)
shutil.rmtree(jar_dir)
print('upload source & translations *.json')
subprocess.run(['java', '-jar', jar_path, 'upload', 'sources', '--config', YAML_PATH])
subprocess.run(['java', '-jar', jar_path, 'upload', 'translations', '--config', YAML_PATH])
print('wait for crowdin server to process data')
time.sleep(10)
print('download translation *.json')
subprocess.run(['java', '-jar', jar_path, 'download', '--config', YAML_PATH])
# Reset Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": ".*?"', '"api_token": "_secret_"', crowdin_config, 1)
# TODO this is NOT safe!
crowdin_config = re.sub(re.escape(f'/{CORE_NAME}'),
'/_core_name_',
crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
with open('intl/upload_workflow.py', 'r') as workflow:
workflow_config = workflow.read()
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/core_option_translation.py', dir_path, core_name])",
"subprocess.run(['python3', 'intl/crowdin_prep.py', dir_path, core_name])"
)
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/initial_sync.py', api_key, core_name])",
"subprocess.run(['python3', 'intl/crowdin_source_upload.py', api_key, core_name])"
)
with open('intl/upload_workflow.py', 'w') as workflow:
workflow.write(workflow_config)
with open('intl/download_workflow.py', 'r') as workflow:
workflow_config = workflow.read()
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/core_option_translation.py', dir_path, core_name])",
"subprocess.run(['python3', 'intl/crowdin_prep.py', dir_path, core_name])"
)
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/initial_sync.py', api_key, core_name])",
"subprocess.run(['python3', 'intl/crowdin_translation_download.py', api_key, core_name])"
)
with open('intl/download_workflow.py', 'w') as workflow:
workflow.write(workflow_config)
except Exception as e:
# Try really hard to reset Crowdin API Key
with open(YAML_PATH, 'r') as crowdin_config_file:
crowdin_config = crowdin_config_file.read()
crowdin_config = re.sub(r'"api_token": ".*?"',
'"api_token": "_secret_"',
crowdin_config, 1)
# TODO this is NOT safe!
crowdin_config = re.sub(re.escape(f'/{CORE_NAME}'),
'/_core_name_',
crowdin_config)
with open(YAML_PATH, 'w') as crowdin_config_file:
crowdin_config_file.write(crowdin_config)
raise e

View File

@ -0,0 +1,30 @@
#!/usr/bin/env python3
with open('intl/upload_workflow.py', 'r') as workflow:
workflow_config = workflow.read()
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/core_option_translation.py', dir_path, core_name])",
"subprocess.run(['python3', 'intl/crowdin_prep.py', dir_path, core_name])"
)
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/initial_sync.py', api_key, core_name])",
"subprocess.run(['python3', 'intl/crowdin_source_upload.py', api_key, core_name])"
)
with open('intl/upload_workflow.py', 'w') as workflow:
workflow.write(workflow_config)
with open('intl/download_workflow.py', 'r') as workflow:
workflow_config = workflow.read()
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/core_option_translation.py', dir_path, core_name])",
"subprocess.run(['python3', 'intl/crowdin_prep.py', dir_path, core_name])"
)
workflow_config = workflow_config.replace(
"subprocess.run(['python3', 'intl/initial_sync.py', api_key, core_name])",
"subprocess.run(['python3', 'intl/crowdin_translation_download.py', api_key, core_name])"
)
with open('intl/download_workflow.py', 'w') as workflow:
workflow.write(workflow_config)

15
intl/upload_workflow.py Normal file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env python3
import sys
import subprocess
try:
api_key = sys.argv[1]
core_name = sys.argv[2]
dir_path = sys.argv[3]
except IndexError as e:
print('Please provide path to libretro_core_options.h, Crowdin API Token and core name!')
raise e
subprocess.run(['python3', 'intl/crowdin_prep.py', dir_path, core_name])
subprocess.run(['python3', 'intl/crowdin_source_upload.py', api_key, core_name])

459
intl/v1_to_v2_converter.py Normal file
View File

@ -0,0 +1,459 @@
#!/usr/bin/env python3
"""Core options v1 to v2 converter
Just run this script as follows, to convert 'libretro_core_options.h' & 'Libretro_coreoptions_intl.h' to v2:
python3 "/path/to/v1_to_v2_converter.py" "/path/to/where/libretro_core_options.h & Libretro_coreoptions_intl.h/are"
The original files will be preserved as *.v1
"""
import core_option_regex as cor
import os
import sys
def create_v2_code_file(struct_text, file_name):
def replace_option(option_match):
_offset = option_match.start(0)
if option_match.group(3):
res = option_match.group(0)[:option_match.end(2) - _offset] + ',\n NULL' + \
option_match.group(0)[option_match.end(2) - _offset:option_match.end(3) - _offset] + \
'NULL,\n NULL,\n ' + option_match.group(0)[option_match.end(3) - _offset:]
else:
return option_match.group(0)
return res
comment_v1 = '/*\n' \
' ********************************\n' \
' * VERSION: 1.3\n' \
' ********************************\n' \
' *\n' \
' * - 1.3: Move translations to libretro_core_options_intl.h\n' \
' * - libretro_core_options_intl.h includes BOM and utf-8\n' \
' * fix for MSVC 2010-2013\n' \
' * - Added HAVE_NO_LANGEXTRA flag to disable translations\n' \
' * on platforms/compilers without BOM support\n' \
' * - 1.2: Use core options v1 interface when\n' \
' * RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION is >= 1\n' \
' * (previously required RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION == 1)\n' \
' * - 1.1: Support generation of core options v0 retro_core_option_value\n' \
' * arrays containing options with a single value\n' \
' * - 1.0: First commit\n' \
'*/\n'
comment_v2 = '/*\n' \
' ********************************\n' \
' * VERSION: 2.0\n' \
' ********************************\n' \
' *\n' \
' * - 2.0: Add support for core options v2 interface\n' \
' * - 1.3: Move translations to libretro_core_options_intl.h\n' \
' * - libretro_core_options_intl.h includes BOM and utf-8\n' \
' * fix for MSVC 2010-2013\n' \
' * - Added HAVE_NO_LANGEXTRA flag to disable translations\n' \
' * on platforms/compilers without BOM support\n' \
' * - 1.2: Use core options v1 interface when\n' \
' * RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION is >= 1\n' \
' * (previously required RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION == 1)\n' \
' * - 1.1: Support generation of core options v0 retro_core_option_value\n' \
' * arrays containing options with a single value\n' \
' * - 1.0: First commit\n' \
'*/\n'
p_intl = cor.p_intl
p_set = cor.p_set
new_set = 'static INLINE void libretro_set_core_options(retro_environment_t environ_cb,\n' \
' bool *categories_supported)\n' \
'{\n' \
' unsigned version = 0;\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' unsigned language = 0;\n' \
'#endif\n' \
'\n' \
' if (!environ_cb || !categories_supported)\n' \
' return;\n' \
'\n' \
' *categories_supported = false;\n' \
'\n' \
' if (!environ_cb(RETRO_ENVIRONMENT_GET_CORE_OPTIONS_VERSION, &version))\n' \
' version = 0;\n' \
'\n' \
' if (version >= 2)\n' \
' {\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' struct retro_core_options_v2_intl core_options_intl;\n' \
'\n' \
' core_options_intl.us = &options_us;\n' \
' core_options_intl.local = NULL;\n' \
'\n' \
' if (environ_cb(RETRO_ENVIRONMENT_GET_LANGUAGE, &language) &&\n' \
' (language < RETRO_LANGUAGE_LAST) && (language != RETRO_LANGUAGE_ENGLISH))\n' \
' core_options_intl.local = options_intl[language];\n' \
'\n' \
' *categories_supported = environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_V2_INTL,\n' \
' &core_options_intl);\n' \
'#else\n' \
' *categories_supported = environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_V2,\n' \
' &options_us);\n' \
'#endif\n' \
' }\n' \
' else\n' \
' {\n' \
' size_t i, j;\n' \
' size_t option_index = 0;\n' \
' size_t num_options = 0;\n' \
' struct retro_core_option_definition\n' \
' *option_v1_defs_us = NULL;\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' size_t num_options_intl = 0;\n' \
' struct retro_core_option_v2_definition\n' \
' *option_defs_intl = NULL;\n' \
' struct retro_core_option_definition\n' \
' *option_v1_defs_intl = NULL;\n' \
' struct retro_core_options_intl\n' \
' core_options_v1_intl;\n' \
'#endif\n' \
' struct retro_variable *variables = NULL;\n' \
' char **values_buf = NULL;\n' \
'\n' \
' /* Determine total number of options */\n' \
' while (true)\n' \
' {\n' \
' if (option_defs_us[num_options].key)\n' \
' num_options++;\n' \
' else\n' \
' break;\n' \
' }\n' \
'\n' \
' if (version >= 1)\n' \
' {\n' \
' /* Allocate US array */\n' \
' option_v1_defs_us = (struct retro_core_option_definition *)\n' \
' calloc(num_options + 1, sizeof(struct retro_core_option_definition));\n' \
'\n' \
' /* Copy parameters from option_defs_us array */\n' \
' for (i = 0; i < num_options; i++)\n' \
' {\n' \
' struct retro_core_option_v2_definition *option_def_us = &option_defs_us[i];\n' \
' struct retro_core_option_value *option_values = option_def_us->values;\n' \
' struct retro_core_option_definition *option_v1_def_us = &option_v1_defs_us[i];\n' \
' struct retro_core_option_value *option_v1_values = option_v1_def_us->values;\n' \
'\n' \
' option_v1_def_us->key = option_def_us->key;\n' \
' option_v1_def_us->desc = option_def_us->desc;\n' \
' option_v1_def_us->info = option_def_us->info;\n' \
' option_v1_def_us->default_value = option_def_us->default_value;\n' \
'\n' \
' /* Values must be copied individually... */\n' \
' while (option_values->value)\n' \
' {\n' \
' option_v1_values->value = option_values->value;\n' \
' option_v1_values->label = option_values->label;\n' \
'\n' \
' option_values++;\n' \
' option_v1_values++;\n' \
' }\n' \
' }\n' \
'\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' if (environ_cb(RETRO_ENVIRONMENT_GET_LANGUAGE, &language) &&\n' \
' (language < RETRO_LANGUAGE_LAST) && (language != RETRO_LANGUAGE_ENGLISH) &&\n' \
' options_intl[language])\n' \
' option_defs_intl = options_intl[language]->definitions;\n' \
'\n' \
' if (option_defs_intl)\n' \
' {\n' \
' /* Determine number of intl options */\n' \
' while (true)\n' \
' {\n' \
' if (option_defs_intl[num_options_intl].key)\n' \
' num_options_intl++;\n' \
' else\n' \
' break;\n' \
' }\n' \
'\n' \
' /* Allocate intl array */\n' \
' option_v1_defs_intl = (struct retro_core_option_definition *)\n' \
' calloc(num_options_intl + 1, sizeof(struct retro_core_option_definition));\n' \
'\n' \
' /* Copy parameters from option_defs_intl array */\n' \
' for (i = 0; i < num_options_intl; i++)\n' \
' {\n' \
' struct retro_core_option_v2_definition *option_def_intl = &option_defs_intl[i];\n' \
' struct retro_core_option_value *option_values = option_def_intl->values;\n' \
' struct retro_core_option_definition *option_v1_def_intl = &option_v1_defs_intl[i];\n' \
' struct retro_core_option_value *option_v1_values = option_v1_def_intl->values;\n' \
'\n' \
' option_v1_def_intl->key = option_def_intl->key;\n' \
' option_v1_def_intl->desc = option_def_intl->desc;\n' \
' option_v1_def_intl->info = option_def_intl->info;\n' \
' option_v1_def_intl->default_value = option_def_intl->default_value;\n' \
'\n' \
' /* Values must be copied individually... */\n' \
' while (option_values->value)\n' \
' {\n' \
' option_v1_values->value = option_values->value;\n' \
' option_v1_values->label = option_values->label;\n' \
'\n' \
' option_values++;\n' \
' option_v1_values++;\n' \
' }\n' \
' }\n' \
' }\n' \
'\n' \
' core_options_v1_intl.us = option_v1_defs_us;\n' \
' core_options_v1_intl.local = option_v1_defs_intl;\n' \
'\n' \
' environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS_INTL, &core_options_v1_intl);\n' \
'#else\n' \
' environ_cb(RETRO_ENVIRONMENT_SET_CORE_OPTIONS, option_v1_defs_us);\n' \
'#endif\n' \
' }\n' \
' else\n' \
' {\n' \
' /* Allocate arrays */\n' \
' variables = (struct retro_variable *)calloc(num_options + 1,\n' \
' sizeof(struct retro_variable));\n' \
' values_buf = (char **)calloc(num_options, sizeof(char *));\n' \
'\n' \
' if (!variables || !values_buf)\n' \
' goto error;\n' \
'\n' \
' /* Copy parameters from option_defs_us array */\n' \
' for (i = 0; i < num_options; i++)\n' \
' {\n' \
' const char *key = option_defs_us[i].key;\n' \
' const char *desc = option_defs_us[i].desc;\n' \
' const char *default_value = option_defs_us[i].default_value;\n' \
' struct retro_core_option_value *values = option_defs_us[i].values;\n' \
' size_t buf_len = 3;\n' \
' size_t default_index = 0;\n' \
'\n' \
' values_buf[i] = NULL;\n' \
'\n' \
' if (desc)\n' \
' {\n' \
' size_t num_values = 0;\n' \
'\n' \
' /* Determine number of values */\n' \
' while (true)\n' \
' {\n' \
' if (values[num_values].value)\n' \
' {\n' \
' /* Check if this is the default value */\n' \
' if (default_value)\n' \
' if (strcmp(values[num_values].value, default_value) == 0)\n' \
' default_index = num_values;\n' \
'\n' \
' buf_len += strlen(values[num_values].value);\n' \
' num_values++;\n' \
' }\n' \
' else\n' \
' break;\n' \
' }\n' \
'\n' \
' /* Build values string */\n' \
' if (num_values > 0)\n' \
' {\n' \
' buf_len += num_values - 1;\n' \
' buf_len += strlen(desc);\n' \
'\n' \
' values_buf[i] = (char *)calloc(buf_len, sizeof(char));\n' \
' if (!values_buf[i])\n' \
' goto error;\n' \
'\n' \
' strcpy(values_buf[i], desc);\n' \
' strcat(values_buf[i], "; ");\n' \
'\n' \
' /* Default value goes first */\n' \
' strcat(values_buf[i], values[default_index].value);\n' \
'\n' \
' /* Add remaining values */\n' \
' for (j = 0; j < num_values; j++)\n' \
' {\n' \
' if (j != default_index)\n' \
' {\n' \
' strcat(values_buf[i], "|");\n' \
' strcat(values_buf[i], values[j].value);\n' \
' }\n' \
' }\n' \
' }\n' \
' }\n' \
'\n' \
' variables[option_index].key = key;\n' \
' variables[option_index].value = values_buf[i];\n' \
' option_index++;\n' \
' }\n' \
'\n' \
' /* Set variables */\n' \
' environ_cb(RETRO_ENVIRONMENT_SET_VARIABLES, variables);\n' \
' }\n' \
'\n' \
'error:\n' \
' /* Clean up */\n' \
'\n' \
' if (option_v1_defs_us)\n' \
' {\n' \
' free(option_v1_defs_us);\n' \
' option_v1_defs_us = NULL;\n' \
' }\n' \
'\n' \
'#ifndef HAVE_NO_LANGEXTRA\n' \
' if (option_v1_defs_intl)\n' \
' {\n' \
' free(option_v1_defs_intl);\n' \
' option_v1_defs_intl = NULL;\n' \
' }\n' \
'#endif\n' \
'\n' \
' if (values_buf)\n' \
' {\n' \
' for (i = 0; i < num_options; i++)\n' \
' {\n' \
' if (values_buf[i])\n' \
' {\n' \
' free(values_buf[i]);\n' \
' values_buf[i] = NULL;\n' \
' }\n' \
' }\n' \
'\n' \
' free(values_buf);\n' \
' values_buf = NULL;\n' \
' }\n' \
'\n' \
' if (variables)\n' \
' {\n' \
' free(variables);\n' \
' variables = NULL;\n' \
' }\n' \
' }\n' \
'}\n' \
'\n' \
'#ifdef __cplusplus\n' \
'}\n' \
'#endif'
struct_groups = cor.p_struct.finditer(struct_text)
out_text = struct_text
for construct in struct_groups:
repl_text = ''
declaration = construct.group(1)
struct_match = cor.p_type_name.search(declaration)
if struct_match:
if struct_match.group(3):
struct_type_name_lang = struct_match.group(1, 2, 3)
declaration_end = declaration[struct_match.end(1):]
elif struct_match.group(4):
struct_type_name_lang = struct_match.group(1, 2, 4)
declaration_end = declaration[struct_match.end(1):]
else:
struct_type_name_lang = sum((struct_match.group(1, 2), ('_us',)), ())
declaration_end = f'{declaration[struct_match.end(1):struct_match.end(2)]}_us' \
f'{declaration[struct_match.end(2):]}'
else:
return -1
if 'retro_core_option_definition' == struct_type_name_lang[0]:
import shutil
shutil.copy(file_name, file_name + '.v1')
new_declaration = f'\nstruct retro_core_option_v2_category option_cats{struct_type_name_lang[2]}[] = ' \
'{\n { NULL, NULL, NULL },\n' \
'};\n\n' \
+ declaration[:struct_match.start(1)] + \
'retro_core_option_v2_definition' \
+ declaration_end
offset = construct.start(0)
repl_text = repl_text + cor.re.sub(cor.re.escape(declaration), new_declaration,
construct.group(0)[:construct.start(2) - offset])
content = construct.group(2)
new_content = cor.p_option.sub(replace_option, content)
repl_text = repl_text + new_content + cor.re.sub(r'{\s*NULL,\s*NULL,\s*NULL,\s*{\{0}},\s*NULL\s*},\s*};',
'{ NULL, NULL, NULL, NULL, NULL, NULL, {{0}}, NULL },\n};'
'\n\nstruct retro_core_options_v2 options' +
struct_type_name_lang[2] + ' = {\n'
f' option_cats{struct_type_name_lang[2]},\n'
f' option_defs{struct_type_name_lang[2]}\n'
'};',
construct.group(0)[construct.end(2) - offset:])
out_text = cor.re.sub(cor.re.escape(construct.group(0)), repl_text, out_text)
else:
return -2
with open(file_name, 'w', encoding='utf-8') as code_file:
out_text = cor.re.sub(cor.re.escape(comment_v1), comment_v2, out_text)
intl = p_intl.search(out_text)
if intl:
new_intl = out_text[:intl.start(1)] \
+ 'struct retro_core_options_v2 *options_intl[RETRO_LANGUAGE_LAST]' \
+ out_text[intl.end(1):intl.start(2)] \
+ '\n &options_us, /* RETRO_LANGUAGE_ENGLISH */\n' \
' &options_ja, /* RETRO_LANGUAGE_JAPANESE */\n' \
' &options_fr, /* RETRO_LANGUAGE_FRENCH */\n' \
' &options_es, /* RETRO_LANGUAGE_SPANISH */\n' \
' &options_de, /* RETRO_LANGUAGE_GERMAN */\n' \
' &options_it, /* RETRO_LANGUAGE_ITALIAN */\n' \
' &options_nl, /* RETRO_LANGUAGE_DUTCH */\n' \
' &options_pt_br, /* RETRO_LANGUAGE_PORTUGUESE_BRAZIL */\n' \
' &options_pt_pt, /* RETRO_LANGUAGE_PORTUGUESE_PORTUGAL */\n' \
' &options_ru, /* RETRO_LANGUAGE_RUSSIAN */\n' \
' &options_ko, /* RETRO_LANGUAGE_KOREAN */\n' \
' &options_cht, /* RETRO_LANGUAGE_CHINESE_TRADITIONAL */\n' \
' &options_chs, /* RETRO_LANGUAGE_CHINESE_SIMPLIFIED */\n' \
' &options_eo, /* RETRO_LANGUAGE_ESPERANTO */\n' \
' &options_pl, /* RETRO_LANGUAGE_POLISH */\n' \
' &options_vn, /* RETRO_LANGUAGE_VIETNAMESE */\n' \
' &options_ar, /* RETRO_LANGUAGE_ARABIC */\n' \
' &options_el, /* RETRO_LANGUAGE_GREEK */\n' \
' &options_tr, /* RETRO_LANGUAGE_TURKISH */\n' \
' &options_sv, /* RETRO_LANGUAGE_SLOVAK */\n' \
' &options_fa, /* RETRO_LANGUAGE_PERSIAN */\n' \
' &options_he, /* RETRO_LANGUAGE_HEBREW */\n' \
' &options_ast, /* RETRO_LANGUAGE_ASTURIAN */\n' \
' &options_fi, /* RETRO_LANGUAGE_FINNISH */\n' \
+ out_text[intl.end(2):]
out_text = p_set.sub(new_set, new_intl)
else:
out_text = p_set.sub(new_set, out_text)
code_file.write(out_text)
return 1
# -------------------- MAIN -------------------- #
if __name__ == '__main__':
try:
if os.path.isfile(sys.argv[1]):
_temp = os.path.dirname(sys.argv[1])
else:
_temp = sys.argv[1]
while _temp.endswith('/') or _temp.endswith('\\'):
_temp = _temp[:-1]
DIR_PATH = _temp
except IndexError:
DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print("No path provided, assuming parent directory:\n" + DIR_PATH)
H_FILE_PATH = os.path.join(DIR_PATH, 'libretro_core_options.h')
INTL_FILE_PATH = os.path.join(DIR_PATH, 'libretro_core_options_intl.h')
for file in (H_FILE_PATH, INTL_FILE_PATH):
if os.path.isfile(file):
with open(file, 'r+', encoding='utf-8') as h_file:
text = h_file.read()
try:
test = create_v2_code_file(text, file)
except Exception as e:
print(e)
test = -1
if -1 > test:
print('Your file looks like it already is v2? (' + file + ')')
continue
if 0 > test:
print('An error occured! Please make sure to use the complete v1 struct! (' + file + ')')
continue
else:
print(file + ' not found.')

View File

@ -72,7 +72,7 @@ struct retro_core_option_v2_category option_cats_us[] = {
{
"input",
"Input & Auxiliary Devices",
"Configure gamepad / sensor input and gamepad rumble settings."
"Configure controller / sensor input and controller rumble settings."
},
{
"performance",
@ -387,29 +387,29 @@ struct retro_core_options_v2 options_us = {
#ifndef HAVE_NO_LANGEXTRA
struct retro_core_options_v2 *options_intl[RETRO_LANGUAGE_LAST] = {
&options_us, /* RETRO_LANGUAGE_ENGLISH */
NULL, /* RETRO_LANGUAGE_JAPANESE */
NULL, /* RETRO_LANGUAGE_FRENCH */
&options_ja, /* RETRO_LANGUAGE_JAPANESE */
&options_fr, /* RETRO_LANGUAGE_FRENCH */
&options_es, /* RETRO_LANGUAGE_SPANISH */
NULL, /* RETRO_LANGUAGE_GERMAN */
&options_de, /* RETRO_LANGUAGE_GERMAN */
&options_it, /* RETRO_LANGUAGE_ITALIAN */
NULL, /* RETRO_LANGUAGE_DUTCH */
NULL, /* RETRO_LANGUAGE_PORTUGUESE_BRAZIL */
NULL, /* RETRO_LANGUAGE_PORTUGUESE_PORTUGAL */
NULL, /* RETRO_LANGUAGE_RUSSIAN */
NULL, /* RETRO_LANGUAGE_KOREAN */
NULL, /* RETRO_LANGUAGE_CHINESE_TRADITIONAL */
NULL, /* RETRO_LANGUAGE_CHINESE_SIMPLIFIED */
NULL, /* RETRO_LANGUAGE_ESPERANTO */
NULL, /* RETRO_LANGUAGE_POLISH */
NULL, /* RETRO_LANGUAGE_VIETNAMESE */
NULL, /* RETRO_LANGUAGE_ARABIC */
NULL, /* RETRO_LANGUAGE_GREEK */
&options_nl, /* RETRO_LANGUAGE_DUTCH */
&options_pt_br, /* RETRO_LANGUAGE_PORTUGUESE_BRAZIL */
&options_pt_pt, /* RETRO_LANGUAGE_PORTUGUESE_PORTUGAL */
&options_ru, /* RETRO_LANGUAGE_RUSSIAN */
&options_ko, /* RETRO_LANGUAGE_KOREAN */
&options_cht, /* RETRO_LANGUAGE_CHINESE_TRADITIONAL */
&options_chs, /* RETRO_LANGUAGE_CHINESE_SIMPLIFIED */
&options_eo, /* RETRO_LANGUAGE_ESPERANTO */
&options_pl, /* RETRO_LANGUAGE_POLISH */
&options_vn, /* RETRO_LANGUAGE_VIETNAMESE */
&options_ar, /* RETRO_LANGUAGE_ARABIC */
&options_el, /* RETRO_LANGUAGE_GREEK */
&options_tr, /* RETRO_LANGUAGE_TURKISH */
NULL, /* RETRO_LANGUAGE_SLOVAK */
NULL, /* RETRO_LANGUAGE_PERSIAN */
NULL, /* RETRO_LANGUAGE_HEBREW */
NULL, /* RETRO_LANGUAGE_ASTURIAN */
NULL, /* RETRO_LANGUAGE_FINNISH */
&options_sv, /* RETRO_LANGUAGE_SLOVAK */
&options_fa, /* RETRO_LANGUAGE_PERSIAN */
&options_he, /* RETRO_LANGUAGE_HEBREW */
&options_ast, /* RETRO_LANGUAGE_ASTURIAN */
&options_fi, /* RETRO_LANGUAGE_FINNISH */
};
#endif

File diff suppressed because it is too large Load Diff