Fix Crowdin config & workflow

Also add new languages and update translation scripts
This commit is contained in:
DisasterMo 2023-01-02 19:47:36 +01:00
parent 3c990add6f
commit fdf7fc2870
15 changed files with 8628 additions and 1714 deletions

2
intl/activate.py Executable file → Normal file
View File

@ -62,7 +62,7 @@ if __name__ == '__main__':
translate_txt = translate_txt.replace('<0-59>', f"{minutes}") translate_txt = translate_txt.replace('<0-59>', f"{minutes}")
translate_txt = translate_txt.replace('<0-23>', f"{hour}") translate_txt = translate_txt.replace('<0-23>', f"{hour}")
translate_txt = translate_txt.replace('# Fridays at , UTC', translate_txt = translate_txt.replace('# Fridays at , UTC',
f"# Fridays at {hour%12}:{minutes} {'AM' if hour < 12 else 'PM'}, UTC") f"# Fridays at {hour%12}:{minutes if minutes > 9 else '0' + str(minutes)} {'AM' if hour < 12 else 'PM'}, UTC")
translate_txt = translate_txt.replace("<CORE_NAME>", CORE_NAME) translate_txt = translate_txt.replace("<CORE_NAME>", CORE_NAME)
translate_txt = translate_txt.replace('<PATH/TO>/libretro_core_options_intl.h', translate_txt = translate_txt.replace('<PATH/TO>/libretro_core_options_intl.h',
core_intl_file) core_intl_file)

View File

@ -1,14 +1,14 @@
import re import re
# 0: full struct; 1: up to & including first []; 2: content between first {} # 0: full struct; 1: up to & including first []; 2 & 3: comments; 4: content between first {}
p_struct = re.compile(r'(struct\s*[a-zA-Z0-9_\s]+\[])\s*' p_struct = re.compile(r'(\bstruct\b\s*[a-zA-Z0-9_\s]+\[])\s*' # 1st capturing group
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+)\s*)*' r'(?:(?=(\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+))\2\s*)*' # 2nd capturing group
r'=\s*' # = r'=\s*' # =
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+)\s*)*' r'(?:(?=(\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+))\3\s*)*' # 3rd capturing group
r'{((?:.|[\r\n])*?)\{\s*NULL,\s*NULL,\s*NULL\s*(?:.|[\r\n])*?},?(?:.|[\r\n])*?};') # captures full struct, it's beginning and it's content r'{((?:.|[\r\n])*?)\{\s*NULL,\s*NULL,\s*NULL\s*(?:.|[\r\n])*?},?(?:.|[\r\n])*?};') # captures full struct, it's beginning and it's content
# 0: type name[]; 1: type; 2: name # 0: type name[]; 1: type; 2: name
p_type_name = re.compile(r'(retro_core_option_[a-zA-Z0-9_]+)\s*' p_type_name = re.compile(r'(\bretro_core_option_[a-zA-Z0-9_]+)\s*'
r'(option_cats([a-z_]{0,8})|option_defs([a-z_]{0,8}))\s*\[]') r'(\boption_cats([a-z_]{0,8})|\boption_defs([a-z_]*))\s*\[]')
# 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs # 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs
p_option = re.compile(r'{\s*' # opening braces p_option = re.compile(r'{\s*' # opening braces
r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*' r'(?:(?:\/\*(?:.|[\r\n])*?\*\/|\/\/.*[\r\n]+|#.*[\r\n]+)\s*)*'
@ -76,9 +76,9 @@ p_key_value = re.compile(r'{\s*' # opening braces
p_masked = re.compile(r'([A-Z_][A-Z0-9_]+)\s*(\"(?:"\s*"|\\\s*|.)*\")') p_masked = re.compile(r'([A-Z_][A-Z0-9_]+)\s*(\"(?:"\s*"|\\\s*|.)*\")')
p_intl = re.compile(r'(struct retro_core_option_definition \*option_defs_intl\[RETRO_LANGUAGE_LAST]) = {' p_intl = re.compile(r'(\bstruct retro_core_option_definition \*option_defs_intl\[RETRO_LANGUAGE_LAST]) = {'
r'((?:.|[\r\n])*?)};') r'((?:.|[\r\n])*?)};')
p_set = re.compile(r'static INLINE void libretro_set_core_options\(retro_environment_t environ_cb\)' p_set = re.compile(r'\bstatic INLINE void libretro_set_core_options\(retro_environment_t environ_cb\)'
r'(?:.|[\r\n])*?};?\s*#ifdef __cplusplus\s*}\s*#endif') r'(?:.|[\r\n])*?};?\s*#ifdef __cplusplus\s*}\s*#endif')
p_yaml = re.compile(r'"project_id": "[0-9]+".*\s*' p_yaml = re.compile(r'"project_id": "[0-9]+".*\s*'

79
intl/core_option_translation.py Executable file → Normal file
View File

@ -134,13 +134,12 @@ def is_viable_non_dupe(text: str, comparison) -> bool:
def is_viable_value(text: str) -> bool: def is_viable_value(text: str) -> bool:
"""text must be longer than 2 ('""'), not 'NULL' and text.lower() not in """text must be longer than 2 ('""') and not 'NULL'.
{'"enabled"', '"disabled"', '"true"', '"false"', '"on"', '"off"'}.
:param text: String to be tested. :param text: String to be tested.
:return: bool :return: bool
""" """
return 2 < len(text) and text != 'NULL' and text.lower() not in ON_OFFS return 2 < len(text) and text != 'NULL'
def create_non_dupe(base_name: str, opt_num: int, comparison) -> str: def create_non_dupe(base_name: str, opt_num: int, comparison) -> str:
@ -183,17 +182,17 @@ def get_texts(text: str) -> dict:
if lang not in just_string: if lang not in just_string:
hash_n_string[lang] = {} hash_n_string[lang] = {}
just_string[lang] = set() just_string[lang] = set()
is_v2_definition = 'retro_core_option_v2_definition' == struct_type_name[0]
is_v2 = False
pre_name = '' pre_name = ''
# info texts format
p = cor.p_info p = cor.p_info
if 'retro_core_option_v2_definition' == struct_type_name[0]: if 'retro_core_option_v2_category' == struct_type_name[0]:
is_v2 = True # prepend category labels, as they can be the same as option labels
elif 'retro_core_option_v2_category' == struct_type_name[0]:
pre_name = 'CATEGORY_' pre_name = 'CATEGORY_'
# categories have different info texts format
p = cor.p_info_cat p = cor.p_info_cat
struct_content = struct.group(2) struct_content = struct.group(4)
# 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs # 0: full option; 1: key; 2: description; 3: additional info; 4: key/value pairs
struct_options = cor.p_option.finditer(struct_content) struct_options = cor.p_option.finditer(struct_content)
for opt, option in enumerate(struct_options): for opt, option in enumerate(struct_options):
@ -219,7 +218,7 @@ def get_texts(text: str) -> dict:
if option.group(3): if option.group(3):
infos = option.group(3) infos = option.group(3)
option_info = p.finditer(infos) option_info = p.finditer(infos)
if is_v2: if is_v2_definition:
desc1 = next(option_info).group(1) desc1 = next(option_info).group(1)
if is_viable_non_dupe(desc1, just_string[lang]): if is_viable_non_dupe(desc1, just_string[lang]):
just_string[lang].add(desc1) just_string[lang].add(desc1)
@ -248,16 +247,21 @@ def get_texts(text: str) -> dict:
else: else:
raise ValueError(f'Too few arguments in struct {struct_type_name[1]} option {option.group(1)}!') raise ValueError(f'Too few arguments in struct {struct_type_name[1]} option {option.group(1)}!')
# group 4: # group 4: key/value pairs
if option.group(4): if option.group(4):
for j, kv_set in enumerate(cor.p_key_value.finditer(option.group(4))): for j, kv_set in enumerate(cor.p_key_value.finditer(option.group(4))):
set_key, set_value = kv_set.group(1, 2) set_key, set_value = kv_set.group(1, 2)
if not is_viable_value(set_value): if not is_viable_value(set_value):
if not is_viable_value(set_key): # use the key if value not available
continue
set_value = set_key set_value = set_key
if not is_viable_value(set_value):
continue
# re.fullmatch(r'(?:[+-][0-9]+)+', value[1:-1]) # re.fullmatch(r'(?:[+-][0-9]+)+', value[1:-1])
if set_value not in just_string[lang] and not re.sub(r'[+-]', '', set_value[1:-1]).isdigit():
# add only if non-dupe, not translated by RetroArch directly & not purely numeric
if set_value not in just_string[lang]\
and set_value.lower() not in ON_OFFS\
and not re.sub(r'[+-]', '', set_value[1:-1]).isdigit():
clean_key = set_key[1:-1] clean_key = set_key[1:-1]
clean_key = remove_special_chars(clean_key).upper().replace(' ', '_') clean_key = remove_special_chars(clean_key).upper().replace(' ', '_')
m_h = create_non_dupe(re.sub(r'__+', '_', f"OPTION_VAL_{clean_key}"), opt, hash_n_string[lang]) m_h = create_non_dupe(re.sub(r'__+', '_', f"OPTION_VAL_{clean_key}"), opt, hash_n_string[lang])
@ -298,8 +302,12 @@ def h2json(file_paths: dict) -> dict:
for file_lang in file_paths: for file_lang in file_paths:
if not os.path.isfile(file_paths[file_lang]): if not os.path.isfile(file_paths[file_lang]):
continue continue
file_path = file_paths[file_lang]
jsons[file_lang] = file_paths[file_lang][:-2] + '.json' try:
jsons[file_lang] = file_path[:file_path.rindex('.')] + '.json'
except ValueError:
print(f"File {file_path} has incorrect format! File ending missing?")
continue
p = cor.p_masked p = cor.p_masked
@ -397,11 +405,11 @@ def get_crowdin_client(dir_path: str) -> str:
return jar_path return jar_path
def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str, file_path: str) -> None: def create_intl_file(intl_file_path: str, localisations_path: str, text: str, file_path: str) -> None:
"""Creates 'libretro_core_options_intl.h' from Crowdin translations. """Creates 'libretro_core_options_intl.h' from Crowdin translations.
:param localisation_file_path: Path to 'libretro_core_options_intl.h' :param intl_file_path: Path to 'libretro_core_options_intl.h'
:param intl_dir_path: Path to the intl/<core_name> directory. :param localisations_path: Path to the intl/<core_name> directory.
:param text: Content of the 'libretro_core_options.h' being translated. :param text: Content of the 'libretro_core_options.h' being translated.
:param file_path: Path to the '_us.h' file, containing the original English texts. :param file_path: Path to the '_us.h' file, containing the original English texts.
:return: None :return: None
@ -497,10 +505,11 @@ def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str,
'extern "C" {\n' \ 'extern "C" {\n' \
'#endif\n' '#endif\n'
if os.path.isfile(localisation_file_path): if os.path.isfile(intl_file_path):
# copy top of the file for re-use # copy top of the file for re-use
with open(localisation_file_path, 'r', encoding='utf-8') as intl: # libretro_core_options_intl.h with open(intl_file_path, 'r', encoding='utf-8') as intl: # libretro_core_options_intl.h
in_text = intl.read() in_text = intl.read()
# attempt 1: find the distinct comment header
intl_start = re.search(re.escape('/*\n' intl_start = re.search(re.escape('/*\n'
' ********************************\n' ' ********************************\n'
' * Core Option Definitions\n' ' * Core Option Definitions\n'
@ -509,19 +518,22 @@ def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str,
if intl_start: if intl_start:
out_txt = in_text[:intl_start.end(0)] out_txt = in_text[:intl_start.end(0)]
else: else:
# attempt 2: if no comment header present, find c++ compiler instruction (it is kind of a must)
intl_start = re.search(re.escape('#ifdef __cplusplus\n' intl_start = re.search(re.escape('#ifdef __cplusplus\n'
'extern "C" {\n' 'extern "C" {\n'
'#endif\n'), in_text) '#endif\n'), in_text)
if intl_start: if intl_start:
out_txt = in_text[:intl_start.end(0)] out_txt = in_text[:intl_start.end(0)]
# if all attempts fail, use default from above
# only write to file, if there is anything worthwhile to write! # only write to file, if there is anything worthwhile to write!
overwrite = False overwrite = False
# iterate through localisation files # iterate through localisation files
files = {} files = {}
for file in os.scandir(intl_dir_path): for file in os.scandir(localisations_path):
files[file.name] = {'is_file': file.is_file(), 'path': file.path} files[file.name] = {'is_file': file.is_file(), 'path': file.path}
for file in sorted(files): # intl/<core_name>/_* for file in sorted(files): # intl/<core_name>/_*
if files[file]['is_file'] \ if files[file]['is_file'] \
and file.startswith('_') \ and file.startswith('_') \
@ -532,6 +544,7 @@ def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str,
struct_groups = cor.p_struct.finditer(text) struct_groups = cor.p_struct.finditer(text)
lang_low = os.path.splitext(file)[0].lower() lang_low = os.path.splitext(file)[0].lower()
lang_up = lang_low.upper() lang_up = lang_low.upper()
# mark each language's section with a comment, for readability
out_txt = out_txt + f'/* RETRO_LANGUAGE{lang_up} */\n\n' # /* RETRO_LANGUAGE_NM */ out_txt = out_txt + f'/* RETRO_LANGUAGE{lang_up} */\n\n' # /* RETRO_LANGUAGE_NM */
# copy adjusted translations (makros) # copy adjusted translations (makros)
@ -544,22 +557,22 @@ def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str,
if 3 > len(struct_type_name): # no language specifier if 3 > len(struct_type_name): # no language specifier
new_decl = re.sub(re.escape(struct_type_name[1]), struct_type_name[1] + lang_low, declaration) new_decl = re.sub(re.escape(struct_type_name[1]), struct_type_name[1] + lang_low, declaration)
else: else:
new_decl = re.sub(re.escape(struct_type_name[2]), lang_low, declaration)
if '_us' != struct_type_name[2]: if '_us' != struct_type_name[2]:
# only use _us constructs - other languages present in the source file are not important
continue continue
new_decl = re.sub(re.escape(struct_type_name[2]), lang_low, declaration)
p = cor.p_info p = (cor.p_info_cat if 'retro_core_option_v2_category' == struct_type_name[0] else cor.p_info)
if 'retro_core_option_v2_category' == struct_type_name[0]:
p = cor.p_info_cat
offset_construct = construct.start(0) offset_construct = construct.start(0)
# append localised construct name and ' = {'
start = construct.end(1) - offset_construct start = construct.end(1) - offset_construct
end = construct.start(2) - offset_construct end = construct.start(4) - offset_construct
out_txt = out_txt + new_decl + construct.group(0)[start:end] out_txt = out_txt + new_decl + construct.group(0)[start:end]
# insert macros
content = construct.group(2) content = construct.group(4)
new_content = cor.p_option.sub(replace_option, content) new_content = cor.p_option.sub(replace_option, content)
start = construct.end(4) - offset_construct
start = construct.end(2) - offset_construct # append macro-filled content and close the construct
out_txt = out_txt + new_content + construct.group(0)[start:] + '\n' out_txt = out_txt + new_content + construct.group(0)[start:] + '\n'
# for v2 # for v2
@ -574,7 +587,7 @@ def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str,
# only write to file, if there is anything worthwhile to write! # only write to file, if there is anything worthwhile to write!
if overwrite: if overwrite:
with open(localisation_file_path, 'w', encoding='utf-8') as intl: with open(intl_file_path, 'w', encoding='utf-8') as intl:
intl.write(out_txt + '\n#ifdef __cplusplus\n' intl.write(out_txt + '\n#ifdef __cplusplus\n'
'}\n#endif\n' '}\n#endif\n'
'\n#endif') '\n#endif')
@ -585,7 +598,7 @@ def create_intl_file(localisation_file_path: str, intl_dir_path: str, text: str,
if __name__ == '__main__': if __name__ == '__main__':
try: try:
if os.path.isfile(sys.argv[1]): if os.path.isfile(sys.argv[1]) or sys.argv[1].endswith('.h'):
_temp = os.path.dirname(sys.argv[1]) _temp = os.path.dirname(sys.argv[1])
else: else:
_temp = sys.argv[1] _temp = sys.argv[1]

View File

@ -6,8 +6,8 @@
"files": "files":
[ [
{ {
"source": "/intl/_core_name_/_us.json", "source": "/_core_name_/_us.json",
"dest": "/_core_name_/_core_name_.json", "dest": "/_core_name_/_core_name_.json",
"translation": "/intl/_core_name_/_%two_letters_code%.json", "translation": "/_core_name_/_%two_letters_code%.json",
}, },
] ]

2
intl/crowdin_prep.py Executable file → Normal file
View File

@ -4,7 +4,7 @@ import core_option_translation as t
if __name__ == '__main__': if __name__ == '__main__':
try: try:
if t.os.path.isfile(t.sys.argv[1]): if t.os.path.isfile(t.sys.argv[1]) or t.sys.argv[1].endswith('.h'):
_temp = t.os.path.dirname(t.sys.argv[1]) _temp = t.os.path.dirname(t.sys.argv[1])
else: else:
_temp = t.sys.argv[1] _temp = t.sys.argv[1]

0
intl/crowdin_source_upload.py Executable file → Normal file
View File

2
intl/crowdin_translate.py Executable file → Normal file
View File

@ -4,7 +4,7 @@ import core_option_translation as t
if __name__ == '__main__': if __name__ == '__main__':
try: try:
if t.os.path.isfile(t.sys.argv[1]): if t.os.path.isfile(t.sys.argv[1]) or t.sys.argv[1].endswith('.h'):
_temp = t.os.path.dirname(t.sys.argv[1]) _temp = t.os.path.dirname(t.sys.argv[1])
else: else:
_temp = t.sys.argv[1] _temp = t.sys.argv[1]

0
intl/crowdin_translation_download.py Executable file → Normal file
View File

0
intl/download_workflow.py Executable file → Normal file
View File

0
intl/initial_sync.py Executable file → Normal file
View File

0
intl/remove_initial_cycle.py Executable file → Normal file
View File

0
intl/upload_workflow.py Executable file → Normal file
View File

15
intl/v1_to_v2_converter.py Executable file → Normal file
View File

@ -9,7 +9,6 @@ The original files will be preserved as *.v1
""" """
import core_option_regex as cor import core_option_regex as cor
import os import os
import sys
import glob import glob
@ -380,7 +379,8 @@ def create_v2_code_file(struct_text, file_name):
f' option_defs{struct_type_name_lang[2]}\n' f' option_defs{struct_type_name_lang[2]}\n'
'};', '};',
construct.group(0)[construct.end(2) - offset:]) construct.group(0)[construct.end(2) - offset:])
out_text = cor.re.sub(cor.re.escape(construct.group(0)), repl_text, out_text) out_text = out_text.replace(construct.group(0), repl_text)
#out_text = cor.re.sub(cor.re.escape(construct.group(0)), repl_text, raw_out)
else: else:
return -2 return -2
with open(file_name, 'w', encoding='utf-8') as code_file: with open(file_name, 'w', encoding='utf-8') as code_file:
@ -409,11 +409,19 @@ def create_v2_code_file(struct_text, file_name):
' &options_ar, /* RETRO_LANGUAGE_ARABIC */\n' \ ' &options_ar, /* RETRO_LANGUAGE_ARABIC */\n' \
' &options_el, /* RETRO_LANGUAGE_GREEK */\n' \ ' &options_el, /* RETRO_LANGUAGE_GREEK */\n' \
' &options_tr, /* RETRO_LANGUAGE_TURKISH */\n' \ ' &options_tr, /* RETRO_LANGUAGE_TURKISH */\n' \
' &options_sv, /* RETRO_LANGUAGE_SLOVAK */\n' \ ' &options_sk, /* RETRO_LANGUAGE_SLOVAK */\n' \
' &options_fa, /* RETRO_LANGUAGE_PERSIAN */\n' \ ' &options_fa, /* RETRO_LANGUAGE_PERSIAN */\n' \
' &options_he, /* RETRO_LANGUAGE_HEBREW */\n' \ ' &options_he, /* RETRO_LANGUAGE_HEBREW */\n' \
' &options_ast, /* RETRO_LANGUAGE_ASTURIAN */\n' \ ' &options_ast, /* RETRO_LANGUAGE_ASTURIAN */\n' \
' &options_fi, /* RETRO_LANGUAGE_FINNISH */\n' \ ' &options_fi, /* RETRO_LANGUAGE_FINNISH */\n' \
' &options_id, /* RETRO_LANGUAGE_INDONESIAN */\n' \
' &options_sv, /* RETRO_LANGUAGE_SWEDISH */\n' \
' &options_uk, /* RETRO_LANGUAGE_UKRAINIAN */\n' \
' &options_cs, /* RETRO_LANGUAGE_CZECH */\n' \
' &options_val, /* RETRO_LANGUAGE_CATALAN_VALENCIA */\n' \
' &options_ca, /* RETRO_LANGUAGE_CATALAN */\n' \
' &options_en, /* RETRO_LANGUAGE_BRITISH_ENGLISH */\n' \
' &options_hu, /* RETRO_LANGUAGE_HUNGARIAN */\n' \
+ out_text[intl.end(2):] + out_text[intl.end(2):]
out_text = p_set.sub(new_set, new_intl) out_text = p_set.sub(new_set, new_intl)
else: else:
@ -456,7 +464,6 @@ if __name__ == '__main__':
H_FILE_PATH = core_op_file H_FILE_PATH = core_op_file
INTL_FILE_PATH = core_op_file.replace("libretro_core_options.h", 'libretro_core_options_intl.h') INTL_FILE_PATH = core_op_file.replace("libretro_core_options.h", 'libretro_core_options_intl.h')
for file in (H_FILE_PATH, INTL_FILE_PATH): for file in (H_FILE_PATH, INTL_FILE_PATH):
if os.path.isfile(file): if os.path.isfile(file):
with open(file, 'r+', encoding='utf-8') as h_file: with open(file, 'r+', encoding='utf-8') as h_file:

View File

@ -283,6 +283,14 @@ enum retro_language
RETRO_LANGUAGE_HEBREW = 21, RETRO_LANGUAGE_HEBREW = 21,
RETRO_LANGUAGE_ASTURIAN = 22, RETRO_LANGUAGE_ASTURIAN = 22,
RETRO_LANGUAGE_FINNISH = 23, RETRO_LANGUAGE_FINNISH = 23,
RETRO_LANGUAGE_INDONESIAN = 24,
RETRO_LANGUAGE_SWEDISH = 25,
RETRO_LANGUAGE_UKRAINIAN = 26,
RETRO_LANGUAGE_CZECH = 27,
RETRO_LANGUAGE_CATALAN_VALENCIA = 28,
RETRO_LANGUAGE_CATALAN = 29,
RETRO_LANGUAGE_BRITISH_ENGLISH = 30,
RETRO_LANGUAGE_HUNGARIAN = 31,
RETRO_LANGUAGE_LAST, RETRO_LANGUAGE_LAST,
/* Ensure sizeof(enum) == sizeof(int) */ /* Ensure sizeof(enum) == sizeof(int) */
@ -1753,6 +1761,12 @@ enum retro_mod
* the frontend is attempting to call retro_run(). * the frontend is attempting to call retro_run().
*/ */
#define RETRO_ENVIRONMENT_GET_SAVESTATE_CONTEXT (72 | RETRO_ENVIRONMENT_EXPERIMENTAL)
/* int * --
* Tells the core about the context the frontend is asking for savestate.
* (see enum retro_savestate_context)
*/
/* VFS functionality */ /* VFS functionality */
/* File paths: /* File paths:
@ -2990,6 +3004,35 @@ enum retro_pixel_format
RETRO_PIXEL_FORMAT_UNKNOWN = INT_MAX RETRO_PIXEL_FORMAT_UNKNOWN = INT_MAX
}; };
enum retro_savestate_context
{
/* Standard savestate written to disk. */
RETRO_SAVESTATE_CONTEXT_NORMAL = 0,
/* Savestate where you are guaranteed that the same instance will load the save state.
* You can store internal pointers to code or data.
* It's still a full serialization and deserialization, and could be loaded or saved at any time.
* It won't be written to disk or sent over the network.
*/
RETRO_SAVESTATE_CONTEXT_RUNAHEAD_SAME_INSTANCE = 1,
/* Savestate where you are guaranteed that the same emulator binary will load that savestate.
* You can skip anything that would slow down saving or loading state but you can not store internal pointers.
* It won't be written to disk or sent over the network.
* Example: "Second Instance" runahead
*/
RETRO_SAVESTATE_CONTEXT_RUNAHEAD_SAME_BINARY = 2,
/* Savestate used within a rollback netplay feature.
* You should skip anything that would unnecessarily increase bandwidth usage.
* It won't be written to disk but it will be sent over the network.
*/
RETRO_SAVESTATE_CONTEXT_ROLLBACK_NETPLAY = 3,
/* Ensure sizeof() == sizeof(int). */
RETRO_SAVESTATE_CONTEXT_UNKNOWN = INT_MAX
};
struct retro_message struct retro_message
{ {
const char *msg; /* Message to be displayed. */ const char *msg; /* Message to be displayed. */
@ -3461,6 +3504,10 @@ struct retro_core_option_definition
const char *default_value; const char *default_value;
}; };
#ifdef __PS3__
#undef local
#endif
struct retro_core_options_intl struct retro_core_options_intl
{ {
/* Pointer to an array of retro_core_option_definition structs /* Pointer to an array of retro_core_option_definition structs

File diff suppressed because it is too large Load Diff