Bug 1786490 - reformat the tree using black 23.3.0 r=sylvestre,webdriver-reviewers,taskgraph-reviewers,whimboo,jmaher,ahal,perftest-reviewers,geckoview-reviewers,afinder,m_kato

# ignore-this-changeset

Differential Revision: https://phabricator.services.mozilla.com/D186092
This commit is contained in:
Logan Rosen 2023-09-06 16:14:30 +00:00
parent 51f5fcdd75
commit f024d94b2b
290 changed files with 777 additions and 840 deletions

View File

@ -14,7 +14,6 @@ if sys.version_info[0] < 3:
class MetaPathFinder(object):
pass
else:
from importlib.abc import MetaPathFinder

View File

@ -58,6 +58,7 @@ set_config("MOZ_REPLACE_MALLOC_STATIC", replace_malloc_static)
# PHC (Probabilistic Heap Checker)
# ==============================================================
# In general, it only makes sense for PHC to run on the platforms that have a
# crash reporter.
@depends(

View File

@ -83,6 +83,7 @@ def pkgconf_requires_private(system_nspr, nspr_minver):
set_config("PKGCONF_REQUIRES_PRIVATE", pkgconf_requires_private)
# pkg_check_modules takes care of NSPR_CFLAGS and NSPR_LIBS when using --with-system-nspr.
@depends(build_environment, c_compiler, fold_libs, when=build_nspr)
def nspr_config(build_env, c_compiler, fold_libs):

View File

@ -1033,6 +1033,7 @@ def sysroot(host_or_target, target_sysroot=None):
opt = "--with-host-sysroot"
env = "HOST_SYSROOT"
when = depends(host)(lambda h: h.kernel == "Linux")
# Only bootstrap a host sysroot when using a bootstrapped target sysroot
# or when the target doesn't use a bootstrapped sysroot in the first place.
@depends(when, bootstrap_target_when, target_sysroot.bootstrapped)
@ -1731,7 +1732,6 @@ def select_linker_tmpl(host_or_target):
@imports("os")
@imports("shutil")
def select_linker(linker, c_compiler, developer_options, toolchain_flags, target):
if linker:
linker = linker[0]
else:
@ -2500,6 +2500,7 @@ def cet_ldflags(c_compiler, target):
set_config("MOZ_CETCOMPAT_LDFLAGS", cet_ldflags)
# Frame pointers
# ==============================================================
@depends(c_compiler)
@ -2785,6 +2786,7 @@ add_old_configure_assignment(
"ENABLE_MOZSEARCH_PLUGIN", depends_if("--enable-mozsearch-plugin")(lambda _: True)
)
# Libstdc++ compatibility hacks
# ==============================================================
#
@ -2966,6 +2968,7 @@ add_old_configure_assignment("LIBFUZZER_FLAGS", libfuzzer_flags.use_flags)
# Shared library building
# ==============================================================
# XXX: The use of makefile constructs in these variables is awful.
@depends(target, c_compiler)
def make_shared_library(target, compiler):

View File

@ -4,6 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Updater
# ==============================================================
@depends(build_project)

View File

@ -60,7 +60,6 @@ def scan_directory(path):
sys.exit(1)
for file in path.rglob("*"):
if not file.is_file():
continue

View File

@ -208,7 +208,7 @@ def main():
if f in ignored_files and (f, 2) in functions:
fail(f"There should be only one {f} file")
for (filename, n) in sorted(functions):
for filename, n in sorted(functions):
for fn in functions[(filename, n)]:
# An allocation is present in a non-special file. Fail!
fail("'" + fn + "' present in " + filename)

View File

@ -219,7 +219,7 @@ class _MockOpen(_MockBaseOpen):
content = six.ensure_binary(content or b"")
return MockedBytesFile(self, name, content)
else:
content = six.ensure_text(content or u"")
content = six.ensure_text(content or "")
return MockedStringFile(self, name, content)

View File

@ -146,9 +146,9 @@ class TestNsinstall(unittest.TestCase):
@unittest.skipIf(not RUN_NON_ASCII_TESTS, "Skipping non ascii tests")
def test_nsinstall_non_ascii(self):
"Test that nsinstall handles non-ASCII files"
filename = u"\u2325\u3452\u2415\u5081"
filename = "\u2325\u3452\u2415\u5081"
testfile = self.touch(filename)
testdir = self.mkdirs(u"\u4241\u1D04\u1414")
testdir = self.mkdirs("\u4241\u1D04\u1414")
self.assertEqual(
nsinstall([testfile.encode("utf-8"), testdir.encode("utf-8")]), 0
)
@ -162,9 +162,9 @@ class TestNsinstall(unittest.TestCase):
)
def test_nsinstall_non_ascii_subprocess(self):
"Test that nsinstall as a subprocess handles non-ASCII files"
filename = u"\u2325\u3452\u2415\u5081"
filename = "\u2325\u3452\u2415\u5081"
testfile = self.touch(filename)
testdir = self.mkdirs(u"\u4241\u1D04\u1414")
testdir = self.mkdirs("\u4241\u1D04\u1414")
# We don't use subprocess because it can't handle Unicode on
# Windows <http://bugs.python.org/issue1759845>. mozprocess calls
# CreateProcessW directly so it's perfect.

View File

@ -4,6 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Put the content of `filenames[0]` file into `output` file pointer
def main(output, *filenames):
with open(filenames[0], "r", encoding="utf-8") as f:

View File

@ -381,7 +381,7 @@ def fill(template, **args):
t, argModList = compile_fill_template(template)
# Now apply argModList to args
for (name, modified_name, depth) in argModList:
for name, modified_name, depth in argModList:
if not (args[name] == "" or args[name].endswith("\n")):
raise ValueError(
"Argument %s with value %r is missing a newline" % (name, args[name])
@ -4372,7 +4372,7 @@ def InitUnforgeablePropertiesOnHolder(
(defineUnforgeableAttrs, properties.unforgeableAttrs),
(defineUnforgeableMethods, properties.unforgeableMethods),
]
for (template, array) in unforgeableMembers:
for template, array in unforgeableMembers:
if array.hasNonChromeOnly():
unforgeables.append(CGGeneric(template % array.variableName(False)))
if array.hasChromeOnly():
@ -13021,7 +13021,6 @@ class CGUnionStruct(CGThing):
return self.type.getDeps()
def getStruct(self):
members = [
ClassMember("mType", "TypeOrUninit", body="eUninitialized"),
ClassMember("mValue", "Value"),
@ -17065,7 +17064,6 @@ class CGDescriptor(CGThing):
class CGNamespacedEnum(CGThing):
def __init__(self, namespace, enumName, names, values, comment=""):
if not values:
values = []
@ -23425,7 +23423,6 @@ class GlobalGenRoots:
@staticmethod
def PrototypeList(config):
# Prototype ID enum.
descriptorsWithPrototype = config.getDescriptors(
hasInterfacePrototypeObject=True
@ -23613,7 +23610,6 @@ class GlobalGenRoots:
@staticmethod
def RegisterBindings(config):
curr = CGNamespace.build(
["mozilla", "dom"], CGGlobalNames(config.windowGlobalNames)
)
@ -23641,7 +23637,6 @@ class GlobalGenRoots:
@staticmethod
def RegisterWorkerBindings(config):
curr = CGRegisterWorkerBindings(config)
# Wrap all of that in our namespaces.
@ -23668,7 +23663,6 @@ class GlobalGenRoots:
@staticmethod
def RegisterWorkerDebuggerBindings(config):
curr = CGRegisterWorkerDebuggerBindings(config)
# Wrap all of that in our namespaces.
@ -23695,7 +23689,6 @@ class GlobalGenRoots:
@staticmethod
def RegisterWorkletBindings(config):
curr = CGRegisterWorkletBindings(config)
# Wrap all of that in our namespaces.
@ -23722,7 +23715,6 @@ class GlobalGenRoots:
@staticmethod
def RegisterShadowRealmBindings(config):
curr = CGRegisterShadowRealmBindings(config)
# Wrap all of that in our namespaces.

View File

@ -248,7 +248,7 @@ class Configuration(DescriptorProvider):
for m in t.flatMemberTypes:
addUnions(m)
for (t, _) in getAllTypes(self.descriptors, self.dictionaries, self.callbacks):
for t, _ in getAllTypes(self.descriptors, self.dictionaries, self.callbacks):
addUnions(t)
for d in getDictionariesConvertedToJS(
@ -440,7 +440,7 @@ class Configuration(DescriptorProvider):
name,
)
)
for (k, v) in firstExtAttrs.items():
for k, v in firstExtAttrs.items():
if extAttrs[k] != v:
raise TypeError(
"%s on %s and %s on %s have different values for extended attribute %s, but they're using the same template %s."

View File

@ -947,7 +947,6 @@ class IDLInterfaceMixin(IDLInterfaceOrInterfaceMixinOrNamespace):
def validate(self):
for member in self.members:
if member.isAttr():
if member.inherit:
raise WebIDLError(
@ -3165,7 +3164,7 @@ class IDLUnionType(IDLType):
return "MaybeShared" + type.name
return type.name
for (i, type) in enumerate(self.memberTypes):
for i, type in enumerate(self.memberTypes):
if not type.isComplete():
self.memberTypes[i] = type.complete(scope)
@ -3206,7 +3205,7 @@ class IDLUnionType(IDLType):
continue
i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]):
for i, t in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1 :]:
if not t.isDistinguishableFrom(u):
raise WebIDLError(
@ -3619,7 +3618,6 @@ class IDLPromiseType(IDLParametrizedType):
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
"byte",
@ -4287,7 +4285,7 @@ class IDLValue(IDLObject):
)
elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float
if -(2 ** 24) <= self.value <= 2 ** 24:
if -(2**24) <= self.value <= 2**24:
return IDLValue(self.location, type, float(self.value))
else:
raise WebIDLError(
@ -4468,7 +4466,6 @@ class IDLUndefinedValue(IDLObject):
class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins):
Tags = enum(
"Const", "Attr", "Method", "MaplikeOrSetlike", "AsyncIterable", "Iterable"
)
@ -5791,7 +5788,7 @@ class IDLAttribute(IDLInterfaceMember):
"CrossOriginWritable",
"SetterThrows",
]
for (key, value) in self._extendedAttrDict.items():
for key, value in self._extendedAttrDict.items():
if key in allowedExtAttrs:
if value is not True:
raise WebIDLError(
@ -5968,7 +5965,7 @@ class IDLCallback(IDLObjectWithScope):
IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures():
for returnType, arguments in self.signatures():
for argument in arguments:
argument.resolve(self)
@ -6109,7 +6106,6 @@ class IDLMethodOverload:
class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum(
"Getter", "Setter", "Deleter", "LegacyCaller", base=IDLInterfaceMember.Special
)
@ -6292,7 +6288,7 @@ class IDLMethod(IDLInterfaceMember, IDLScope):
assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures():
for returnType, arguments in self.signatures():
for argument in arguments:
argument.resolve(self)
@ -6435,7 +6431,7 @@ class IDLMethod(IDLInterfaceMember, IDLScope):
variadicArgument = None
arguments = overload.arguments
for (idx, argument) in enumerate(arguments):
for idx, argument in enumerate(arguments):
assert argument.type.isComplete()
if (
@ -6568,8 +6564,8 @@ class IDLMethod(IDLInterfaceMember, IDLScope):
def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex + 1 :]:
for firstSigIndex, (firstRetval, firstArgs) in enumerate(signatures[:-1]):
for secondRetval, secondArgs in signatures[firstSigIndex + 1 :]:
if idx < len(firstArgs):
firstType = firstArgs[idx].type
else:

View File

@ -337,7 +337,7 @@ def WebIDLTest(parser, harness):
),
]
for (name, template) in TEMPLATES:
for name, template in TEMPLATES:
parser = parser.reset()
threw = False
try:
@ -346,7 +346,7 @@ def WebIDLTest(parser, harness):
except Exception:
threw = True
harness.ok(not threw, "Template for %s parses without attributes" % name)
for (attribute, type) in ATTRIBUTES:
for attribute, type in ATTRIBUTES:
parser = parser.reset()
threw = False
try:

View File

@ -94,7 +94,7 @@ def WebIDLTest(parser, harness):
"""
)
results = parser.finish()
for (i, iface) in enumerate(results):
for i, iface in enumerate(results):
harness.check(
iface.isSingleOperationInterface(),
i < 4,

View File

@ -64,7 +64,7 @@ def WebIDLTest(parser, harness):
len(iface.members), len(expected), "Expect %s members" % len(expected)
)
for (const, (QName, name, type, value)) in zip(iface.members, expected):
for const, (QName, name, type, value) in zip(iface.members, expected):
harness.ok(isinstance(const, WebIDL.IDLConst), "Should be an IDLConst")
harness.ok(const.isConst(), "Const is a const")
harness.ok(not const.isAttr(), "Const is not an attr")

View File

@ -87,7 +87,7 @@ def WebIDLTest(parser, harness):
)
sigpairs = zip(method.signatures(), signatures)
for (gotSignature, expectedSignature) in sigpairs:
for gotSignature, expectedSignature in sigpairs:
(gotRetType, gotArgs) = gotSignature
(expectedRetType, expectedArgs) = expectedSignature

View File

@ -55,7 +55,7 @@ def WebIDLTest(parser, harness):
method = iface.members[6]
harness.ok(isinstance(method, WebIDL.IDLMethod), "Should be an IDLMethod")
argtypes = [a.type for a in method.signatures()[0][1]]
for (idx, type) in enumerate(argtypes):
for idx, type in enumerate(argtypes):
harness.ok(type.isFloat(), "Type %d should be float" % idx)
harness.check(
type.isUnrestricted(),

View File

@ -90,7 +90,7 @@ def WebIDLTest(parser, harness):
)
sigpairs = zip(method.signatures(), signatures)
for (gotSignature, expectedSignature) in sigpairs:
for gotSignature, expectedSignature in sigpairs:
(gotRetType, gotArgs) = gotSignature
(expectedRetType, expectedArgs) = expectedSignature

View File

@ -4,7 +4,6 @@
def WebIDLTest(parser, harness):
# Test dictionary as inner type
harness.should_throw(
parser,

View File

@ -1,5 +1,4 @@
def WebIDLTest(parser, harness):
parser.parse(
"""
interface Foo {};

View File

@ -89,7 +89,7 @@ def WebIDLTest(parser, harness):
interface PrepareForTest {
"""
)
for (i, type) in enumerate(types):
for i, type in enumerate(types):
interface += string.Template(
"""
readonly attribute ${type} attr${i};
@ -155,7 +155,7 @@ def WebIDLTest(parser, harness):
interface TestUnion {
"""
)
for (i, type) in enumerate(validUnionTypes):
for i, type in enumerate(validUnionTypes):
interface += string.Template(
"""
undefined method${i}(${type} arg);

View File

@ -140,7 +140,6 @@ class CachesDeleteCleanupAtShutdownTestCase(MarionetteTestCase):
beforeUsage = self.getUsage()
def ensureCleanCallback():
Wait(self.marionette, timeout=60).until(
lambda x: (self.getUsage() - beforeUsage)
< EXPECTED_CACHEDIR_SIZE_AFTER_CLEANUP,

View File

@ -510,7 +510,7 @@ def LoadErrata():
ret = {}
for (sectionName, (sectionLineNum, sectionMap)) in iniMap.items():
for sectionName, (sectionLineNum, sectionMap) in iniMap.items():
curLines = []
if sectionName is None:
@ -521,7 +521,7 @@ def LoadErrata():
sectionLineNum, sectionName
)
for (key, (lineNum, val)) in sectionMap.items():
for key, (lineNum, val) in sectionMap.items():
assert key in ACCEPTABLE_ERRATA_KEYS, "Line {}: {}".format(lineNum, key)
curLine = "{} = {}".format(key, val)

View File

@ -7,7 +7,6 @@
include("/ipc/chromium/chromium-config.mozbuild")
if CONFIG["OS_TARGET"] != "WINNT":
if CONFIG["OS_TARGET"] != "Android":
SOURCES += [
"ice_unittest.cpp",

View File

@ -81,7 +81,7 @@ def web_socket_transfer_data(request):
msgutil.send_message(request, six.ensure_text(resp))
elif request.ws_protocol == "test-12":
msg = msgutil.receive_message(request)
if msg == u"a\ufffdb":
if msg == "a\ufffdb":
# converted unpaired surrogate in UTF-16 to UTF-8 OK
msgutil.send_message(request, "SUCCESS")
else:

View File

@ -485,7 +485,7 @@ def export_target(target_full_name) -> Set[str]:
append_arr(lines, "LOCAL_INCLUDES", fixup_paths(desc["include_dirs"]))
append_arr_commented(lines, "CXXFLAGS", cxxflags)
for (config, v) in sorted_items(sources_by_config):
for config, v in sorted_items(sources_by_config):
indent = 0
if config:
lines.append("if {}:".format(config))
@ -506,7 +506,7 @@ def export_target(target_full_name) -> Set[str]:
append_arr(lines, "OS_LIBS", os_libs)
append_arr_commented(lines, "LDFLAGS", ldflags)
for (k, v) in sorted(extras.items()):
for k, v in sorted(extras.items()):
lines.append("{} = {}".format(k, v))
lib_type = desc["type"]

View File

@ -35,7 +35,7 @@ f.close
offsets = []
length = 10 + 11 * len(vsdict)
for (k, mappings) in sorted(vsdict.items()):
for k, mappings in sorted(vsdict.items()):
offsets.append(length)
length += 4 + 5 * len(mappings)
@ -69,10 +69,10 @@ for i, k in enumerate(sorted(vsdict.keys())):
" U24(0x%04X), U32(0), U32(%d), // varSelectorRecord[%d]\n"
% (k, offsets[i], i)
)
for (k, mappings) in sorted(vsdict.items()):
for k, mappings in sorted(vsdict.items()):
f.write(" // 0x%04X\n" % k)
f.write(" U32(%d), // numUVSMappings\n" % len(mappings))
for (unified, compat) in sorted(mappings.items()):
for unified, compat in sorted(mappings.items()):
f.write(" U24(0x%04X), GLYPH(0x%04X),\n" % (unified, compat))
f.write(
"""};

View File

@ -46,7 +46,6 @@ if CONFIG["MOZ_WIDGET_TOOLKIT"] == "android":
"android/AndroidHal.cpp",
]
elif CONFIG["OS_TARGET"] == "Linux":
UNIFIED_SOURCES += [
"fallback/FallbackScreenConfiguration.cpp",
"fallback/FallbackSensor.cpp",

View File

@ -234,7 +234,7 @@ enum IPCMessages {
)
for protocol in sorted(allmessages.keys()):
for (msg, num) in allmessages[protocol].idnums:
for msg, num in allmessages[protocol].idnums:
if num:
print(" %s = %s," % (msg, num), file=ipc_msgtype_name)
elif not msg.endswith("End"):
@ -256,7 +256,7 @@ const char* StringFromIPCMessageType(uint32_t aMessageType)
)
for protocol in sorted(allmessages.keys()):
for (msg, num) in allmessages[protocol].idnums:
for msg, num in allmessages[protocol].idnums:
if num or msg.endswith("End"):
continue
print(

View File

@ -64,9 +64,9 @@ class LowerToCxx:
def hashfunc(value):
h = hash_str(value) % 2 ** 32
h = hash_str(value) % 2**32
if h < 0:
h += 2 ** 32
h += 2**32
return h
@ -2275,7 +2275,7 @@ class _ParamTraits:
# After non-pod data, bulk read/write pod data in member order. This has
# to be done after the result has been constructed, so that we have
# somewhere to read into.
for (size, fields) in itertools.groupby(
for size, fields in itertools.groupby(
sd.fields_member_order(), lambda f: pod_size(f.ipdltype)
):
if size != pod_size_sentinel:
@ -2517,7 +2517,7 @@ class _ComputeTypeDeps(TypeVisitor):
def _fieldStaticAssertions(sd):
staticasserts = []
for (size, fields) in itertools.groupby(
for size, fields in itertools.groupby(
sd.fields_member_order(), lambda f: pod_size(f.ipdltype)
):
if size == pod_size_sentinel:

View File

@ -1721,7 +1721,6 @@ class CheckTypes(TcheckVisitor):
)
if mtype.compress and (not mtype.isAsync() or mtype.isCtor() or mtype.isDtor()):
if mtype.isCtor() or mtype.isDtor():
message_type = "constructor" if mtype.isCtor() else "destructor"
error_message = (

View File

@ -20,11 +20,11 @@ from subprocess import check_call
topsrcdir, chromiumtree, rev = sys.argv[1:]
if not os.path.exists(os.path.join(topsrcdir, "client.py")):
print >>sys.stderr, "Incorrect topsrcdir"
print >> sys.stderr, "Incorrect topsrcdir"
sys.exit(1)
if not os.path.exists(os.path.join(chromiumtree, "src/DEPS")):
print >>sys.stderr, "Incorrect chromium directory, missing DEPS"
print >> sys.stderr, "Incorrect chromium directory, missing DEPS"
sys.exit(1)
check_call(["gclient", "sync", "--force", "--revision=src@%s" % rev], cwd=chromiumtree)

View File

@ -26,6 +26,7 @@ building_ffi = depends(system_ffi)(lambda v: v is None)
set_config("MOZ_SYSTEM_FFI", depends_if(system_ffi)(lambda _: True))
# Target selection, based on ffi/configure.ac.
@depends(target, when=building_ffi)
def ffi_target(target):

View File

@ -173,6 +173,7 @@ def enable_decorators(value):
set_config("ENABLE_DECORATORS", enable_decorators)
set_define("ENABLE_DECORATORS", enable_decorators)
# JIT support
# =======================================================
@depends(target, "--enable-record-tuple")
@ -506,6 +507,7 @@ set_config("JS_MASM_VERBOSE", depends_if("--enable-masm-verbose")(lambda _: True
# Architecture feature flags
# =======================================================
# Apple silicon does not seem to have any way to query the OS for the JSCVT
# flag stored in the ID_AA64ISAR1_EL1 system register. In the mean time, we
# hard code the value of the JSCVT flag which guards the implementation of
@ -521,6 +523,7 @@ option(
help="{Enable|Disable} static use of FJCVTZS instruction on Aarch64 targets.",
)
# The "ARM Architecture Reference Manual" for ARMv8 defines the JSCVT flag as
# being a 4 bit integer (D12.2.52) and it can be manipulated using >= operator
# (D12.1.4).
@ -549,6 +552,7 @@ def has_pthread_jit_write_protect_np(target):
# JIT code write protection.
set_define("JS_USE_APPLE_FAST_WX", True, when=has_pthread_jit_write_protect_np)
# CTypes
# =======================================================
@depends(js_standalone)

View File

@ -99,7 +99,7 @@ def writeMappingsVar(println, mapping, name, description, source, url):
println("")
writeMappingHeader(println, description, source, url)
println("var {0} = {{".format(name))
for (key, value) in sorted(mapping.items(), key=itemgetter(0)):
for key, value in sorted(mapping.items(), key=itemgetter(0)):
println(' "{0}": "{1}",'.format(key, value))
println("};")
@ -173,7 +173,7 @@ def writeMappingsBinarySearchBody(
# for the binary search, which only performs a single |memcmp| for multiple
# of two subtag lengths.
mappings_keys = mappings.keys() if type(mappings) == dict else mappings
for (length, subtags) in groupby(sorted(mappings_keys, key=len), len):
for length, subtags in groupby(sorted(mappings_keys, key=len), len):
# Omit the length check if the current length is the maximum length.
if length != tag_maxlength:
println(
@ -318,7 +318,7 @@ void mozilla::intl::Locale::PerformComplexLanguageMappings() {
# Merge duplicate language entries.
language_aliases = {}
for (deprecated_language, (language, script, region)) in sorted(
for deprecated_language, (language, script, region) in sorted(
complex_language_mappings.items(), key=itemgetter(0)
):
key = (language, script, region)
@ -328,7 +328,7 @@ void mozilla::intl::Locale::PerformComplexLanguageMappings() {
language_aliases[key].append(deprecated_language)
first_language = True
for (deprecated_language, (language, script, region)) in sorted(
for deprecated_language, (language, script, region) in sorted(
complex_language_mappings.items(), key=itemgetter(0)
):
key = (language, script, region)
@ -422,7 +422,7 @@ void mozilla::intl::Locale::PerformComplexRegionMappings() {
# Merge duplicate region entries.
region_aliases = {}
for (deprecated_region, (default, non_default_replacements)) in sorted(
for deprecated_region, (default, non_default_replacements) in sorted(
complex_region_mappings.items(), key=itemgetter(0)
):
key = hash_key(default, non_default_replacements)
@ -432,7 +432,7 @@ void mozilla::intl::Locale::PerformComplexRegionMappings() {
region_aliases[key].append(deprecated_region)
first_region = True
for (deprecated_region, (default, non_default_replacements)) in sorted(
for deprecated_region, (default, non_default_replacements) in sorted(
complex_region_mappings.items(), key=itemgetter(0)
):
key = hash_key(default, non_default_replacements)
@ -587,7 +587,7 @@ bool mozilla::intl::Locale::PerformVariantMappings() {
)
)
for (deprecated_variant, (type, replacement)) in sorted(
for deprecated_variant, (type, replacement) in sorted(
with_alias, key=itemgetter(0)
):
println(
@ -730,7 +730,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
# Group the mappings by language.
legacy_mappings_by_language = {}
for (type, replacement) in legacy_mappings.items():
for type, replacement in legacy_mappings.items():
(language, _, _, _) = type
legacy_mappings_by_language.setdefault(language, {})[type] = replacement
@ -820,7 +820,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
def hash_key(mappings):
return str(sorted(mappings.items(), key=itemgetter(0)))
for (lang, mappings) in sorted(
for lang, mappings in sorted(
legacy_mappings_by_language.items(), key=itemgetter(0)
):
key = hash_key(mappings)
@ -848,10 +848,9 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
return len(k.split("-"))
# Alias rules are applied by largest union size first.
for (size, mappings_by_size) in groupby(
for size, mappings_by_size in groupby(
sorted(mappings.items(), key=variant_size, reverse=True), key=variant_size
):
# Convert grouper object to dict.
mappings_by_size = dict(mappings_by_size)
@ -859,7 +858,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
chain_if = size == 1
# Alias rules are applied in alphabetical order
for (variants, r_language) in sorted(
for variants, r_language in sorted(
mappings_by_size.items(), key=itemgetter(0)
):
sorted_variants = sorted(variants.split("-"))
@ -868,7 +867,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
maybe_else = "else " if chain_if and not is_first else ""
is_first = False
for (i, variant) in enumerate(sorted_variants):
for i, variant in enumerate(sorted_variants):
println(
f"""
{" " * i}{maybe_else}if (auto* {variant} = findVariant("{variant}")) {{
@ -1106,11 +1105,11 @@ def readSupplementalData(core_file):
# Compute the transitive closure.
# Any case which currently doesn't occur in the CLDR sources isn't supported
# and will lead to throwing an error.
for (type, replacement) in rules.items():
for type, replacement in rules.items():
(language, script, region, variants) = type
(r_language, r_script, r_region, r_variants) = replacement
for (i_type, i_replacement) in rules.items():
for i_type, i_replacement in rules.items():
(i_language, i_script, i_region, i_variants) = i_type
(i_r_language, i_r_script, i_r_region, i_r_variants) = i_replacement
@ -1257,7 +1256,7 @@ def readSupplementalData(core_file):
variant_mappings = {}
# Preprocess all rules so we can perform a single lookup per subtag at runtime.
for (type, replacement) in rules.items():
for type, replacement in rules.items():
(language, script, region, variants) = type
(r_language, r_script, r_region, r_variants) = replacement
@ -1399,7 +1398,7 @@ def readSupplementalData(core_file):
complex_region_mappings_final = {}
for (deprecated_region, replacements) in complex_region_mappings.items():
for deprecated_region, replacements in complex_region_mappings.items():
# Find all likely subtag entries which don't already contain a region
# subtag and whose target region is in the list of replacement regions.
region_likely_subtags = [
@ -2507,7 +2506,7 @@ def readICULegacyZones(icuDir):
# A handful of non-IANA zones/links are not in icuzones and must be added
# manually so that we won't invoke ICU with them.
for (zone, target) in otherICULegacyLinks().items():
for zone, target in otherICULegacyLinks().items():
if zone in links:
if links[zone] != target:
raise KeyError(
@ -2712,7 +2711,7 @@ def processTimeZones(
println("// Format:")
println('// "ZoneName" // ICU-Name [time zone file]')
println("const char* const ianaZonesTreatedAsLinksByICU[] = {")
for (zone, icuZone) in incorrectZones:
for zone, icuZone in incorrectZones:
println(' "%s", // %s [%s]' % (zone, icuZone, zone.filename))
println("};")
println("")
@ -2726,7 +2725,7 @@ def processTimeZones(
println("};")
println("")
println("const LinkAndTarget ianaLinksCanonicalizedDifferentlyByICU[] = {")
for (zone, target, icuTarget) in incorrectLinks:
for zone, target, icuTarget in incorrectLinks:
println(
' { "%s", "%s" }, // %s [%s]'
% (zone, target, icuTarget, zone.filename)
@ -2796,7 +2795,7 @@ const tzMapper = [
println(description)
println("const links = {")
for (zone, target) in sorted(links, key=itemgetter(0)):
for zone, target in sorted(links, key=itemgetter(0)):
println(' "%s": "%s",' % (zone, target))
println("};")
@ -3150,10 +3149,10 @@ def writeCurrencyFile(published, currencies, out):
*/"""
)
println("var currencyDigits = {")
for (currency, entries) in groupby(
for currency, entries in groupby(
sorted(currencies, key=itemgetter(0)), itemgetter(0)
):
for (_, minorUnits, currencyName, countryName) in entries:
for _, minorUnits, currencyName, countryName in entries:
println(" // {} ({})".format(currencyName, countryName))
println(" {}: {},".format(currency, minorUnits))
println("};")
@ -3319,7 +3318,7 @@ const char* mozilla::intl::Locale::Replace{0}ExtensionType(
# Merge duplicate keys.
key_aliases = {}
for (key, replacements) in sorted(mapping.items(), key=itemgetter(0)):
for key, replacements in sorted(mapping.items(), key=itemgetter(0)):
hash_key = to_hash_key(replacements)
if hash_key not in key_aliases:
key_aliases[hash_key] = []
@ -3327,7 +3326,7 @@ const char* mozilla::intl::Locale::Replace{0}ExtensionType(
key_aliases[hash_key].append(key)
first_key = True
for (key, replacements) in sorted(mapping.items(), key=itemgetter(0)):
for key, replacements in sorted(mapping.items(), key=itemgetter(0)):
hash_key = to_hash_key(replacements)
if key in key_aliases[hash_key]:
continue
@ -3368,7 +3367,7 @@ const char* mozilla::intl::Locale::Replace{0}ExtensionType(
)
)
else:
for (type, replacement) in replacements:
for type, replacement in replacements:
println(
"""
if (Is{}Type(type, "{}")) {{

View File

@ -166,7 +166,7 @@ if __name__ == "__main__":
test_list = find_tests(test_dir)
if not test_list:
print >>sys.stderr, "No tests found matching command line arguments."
print >> sys.stderr, "No tests found matching command line arguments."
sys.exit(0)
test_list = [Test.from_file(tst, name, OPTIONS) for tst, name in test_list]
@ -178,7 +178,7 @@ if __name__ == "__main__":
except OSError:
if not os.path.exists(JS):
print >>sys.stderr, "JS shell argument: file does not exist: '%s'" % JS
print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % JS
sys.exit(1)
else:
raise

View File

@ -212,7 +212,7 @@ JOBS = {
# - item is command[j]
def out_indexes(command):
i = 0
for (j, fragment) in enumerate(command):
for j, fragment in enumerate(command):
if isinstance(fragment, Output):
yield (i, j, fragment)
i += 1
@ -221,7 +221,7 @@ def out_indexes(command):
def job_command_with_final_output_names(job):
outfiles = job.get("outputs", [])
command = list(job["command"])
for (i, j, name) in out_indexes(job["command"]):
for i, j, name in out_indexes(job["command"]):
command[j] = outfiles[i]
return command
@ -256,7 +256,7 @@ def run_job(name, config):
info["redirect"].close()
# Rename the temporary files to their final names.
for (temp, final) in info["rename_map"].items():
for temp, final in info["rename_map"].items():
try:
if config["verbose"] > 1:
print("Renaming %s -> %s" % (temp, final))
@ -285,7 +285,7 @@ def spawn_command(cmdspec, job, name, config):
# from those temp names to their actual final names that will be used
# if the command succeeds.
command = list(cmdspec)
for (i, j, raw_name) in out_indexes(cmdspec):
for i, j, raw_name in out_indexes(cmdspec):
[name] = fill([raw_name], config)
command[j] = "{}.tmp{}".format(name, config.get("i", ""))
rename_map[command[j]] = outfiles[i]
@ -305,7 +305,7 @@ def spawn_command(cmdspec, job, name, config):
# Default to conservatively assuming 4GB/job.
def max_parallel_jobs(job_size=4 * 2 ** 30):
def max_parallel_jobs(job_size=4 * 2**30):
"""Return the max number of parallel jobs we can run without overfilling
memory, assuming heavyweight jobs."""
from_cores = int(subprocess.check_output(["nproc", "--ignore=1"]).strip())
@ -434,7 +434,7 @@ for step in steps:
elif "outputs" in job and "command" in job:
outfiles = job["outputs"]
num_outputs = 0
for (i, j, name) in out_indexes(job["command"]):
for i, j, name in out_indexes(job["command"]):
# Trim the {curly brackets} off of the output keys.
data[name[1:-1]] = outfiles[i]
num_outputs += 1

View File

@ -35,7 +35,7 @@ class Body(dict):
src, dst = edge["Index"]
self["SrcPoint2Edges"][src].append(edge)
self["Line2Edges"] = defaultdict(list)
for (src, edges) in self["SrcPoint2Edges"].items():
for src, edges in self["SrcPoint2Edges"].items():
line = self["Points"][src]
self["Line2Edges"][line].extend(edges)

View File

@ -21,5 +21,5 @@ def get_header_length_and_flags(value, cache):
# If we couldn't fetch the length directly, it must be stored
# within `flags`.
length = flags >> 32
flags = flags % 2 ** 32
flags = flags % 2**32
return length, flags

View File

@ -120,7 +120,7 @@ class JSValueTypeCache(object):
# the i'th magic value.
d = gdb.types.make_enum_dict(gdb.lookup_type("JSWhyMagic"))
self.magic_names = list(range(max(d.values()) + 1))
for (k, v) in d.items():
for k, v in d.items():
self.magic_names[v] = k
# Choose an unboxing scheme for this architecture.

View File

@ -137,7 +137,7 @@ def clear_module_printers(module_name):
# should remove. (It's not safe to delete entries from a dictionary
# while we're iterating over it.)
to_delete = []
for (k, v) in d.items():
for k, v in d.items():
if v.__module__ == module_name:
to_delete.append(k)
remove_from_subprinter_list(v)
@ -250,7 +250,6 @@ class TypeCache(object):
def implemented_types(t):
# Yield all types that follow |t|.
def followers(t):
if t.code == gdb.TYPE_CODE_TYPEDEF:
@ -346,7 +345,7 @@ def lookup_for_objfile(objfile):
# to scan the whole list, so regexp printers should be used
# sparingly.
s = str(value.type)
for (r, f) in printers_by_regexp:
for r, f in printers_by_regexp:
if f.enabled:
m = r.match(s)
if m:

View File

@ -114,7 +114,6 @@ class Summary(object):
self.bar.finish()
if self.failures:
print("tests failed:")
for test in self.failures:
test.show(sys.stdout)

View File

@ -92,7 +92,6 @@ def extend_condition(condition, value):
class JitTest:
VALGRIND_CMD = []
paths = (d for d in os.environ["PATH"].split(os.pathsep))
valgrinds = (os.path.join(d, "valgrind") for d in paths)
@ -488,7 +487,7 @@ def check_output(out, err, rc, timed_out, test, options):
# Python 3 on Windows interprets process exit codes as unsigned
# integers, where Python 2 used to allow signed integers. Account for
# each possibility here.
if sys.platform == "win32" and rc in (3 - 2 ** 31, 3 + 2 ** 31):
if sys.platform == "win32" and rc in (3 - 2**31, 3 + 2**31):
return True
if sys.platform != "win32" and rc == -11:

View File

@ -73,7 +73,7 @@ def init_device(options):
context = MozbuildObject.from_environment()
adb_path = get_adb_path(context)
except (ImportError):
except ImportError:
adb_path = "adb"
DEVICE = ADBDeviceFactory(

View File

@ -80,7 +80,7 @@ class TestResult:
harness_message = "Exit code reported crash"
tests = []
else:
for (idx, line) in enumerate(stdout):
for idx, line in enumerate(stdout):
if line.startswith("WPT OUTPUT: "):
msg = line[len("WPT OUTPUT: ") :]
data = [output.test.wpt.url] + json.loads(msg)

View File

@ -118,7 +118,7 @@ def read_input(tasks, timeout):
try:
readable, _, _ = select.select(rlist, [], exlist, timeout)
except OverflowError:
print >>sys.stderr, "timeout value", timeout
print >> sys.stderr, "timeout value", timeout
raise
for fd in readable:

View File

@ -299,7 +299,7 @@ def insertMeta(source, frontmatter):
lines.append("/*---")
for (key, value) in frontmatter.items():
for key, value in frontmatter.items():
if key in ("description", "info"):
lines.append("%s: |" % key)
lines.append(
@ -336,7 +336,6 @@ def findAndCopyIncludes(dirPath, baseDir, includeDir):
# we reach the base directory of shell.js include files.
# Each directory will have a shell.js file to copy.
while relPath:
# find the shell.js
shellFile = os.path.join(baseDir, relPath, "shell.js")
@ -367,7 +366,6 @@ def findAndCopyIncludes(dirPath, baseDir, includeDir):
def exportTest262(args):
outDir = os.path.abspath(args.out)
providedSrcs = args.src
includeShell = args.exportshellincludes
@ -384,15 +382,13 @@ def exportTest262(args):
# Go through each source path
for providedSrc in providedSrcs:
src = os.path.abspath(providedSrc)
# the basename of the path will be used in case multiple "src" arguments
# are passed in to create an output directory for each "src".
basename = os.path.basename(src)
# Process all test directories recursively.
for (dirPath, _, fileNames) in os.walk(src):
for dirPath, _, fileNames in os.walk(src):
# we need to make and get the unique set of includes for this filepath
includes = []
if includeShell:

View File

@ -511,7 +511,7 @@ def process_test262(test262Dir, test262OutDir, strictTests, externManifests):
explicitIncludes[os.path.join("built-ins", "Temporal")] = ["temporalHelpers.js"]
# Process all test directories recursively.
for (dirPath, dirNames, fileNames) in os.walk(testDir):
for dirPath, dirNames, fileNames in os.walk(testDir):
relPath = os.path.relpath(dirPath, testDir)
if relPath == ".":
continue
@ -554,7 +554,7 @@ def process_test262(test262Dir, test262OutDir, strictTests, externManifests):
test262parser, testSource, testName, includeSet, strictTests
)
for (newFileName, newSource, externRefTest) in convert:
for newFileName, newSource, externRefTest in convert:
writeTestFile(test262OutDir, newFileName, newSource)
if externRefTest is not None:

View File

@ -202,7 +202,7 @@ def int_ranges(ints):
"""Yields consecutive ranges (inclusive) from integer values."""
(a, b) = tee(sorted(ints))
start = next(b)
for (curr, succ) in zip_longest(a, b):
for curr, succ in zip_longest(a, b):
if curr + 1 != succ:
yield (start, curr)
start = succ
@ -280,7 +280,7 @@ def process_derived_core_properties(derived_core_properties):
id_start = set()
id_continue = set()
for (char, prop) in read_derived_core_properties(derived_core_properties):
for char, prop in read_derived_core_properties(derived_core_properties):
if prop == "ID_Start":
id_start.add(char)
if prop == "ID_Continue":
@ -399,7 +399,7 @@ def process_case_folding(case_folding):
folding_tests = []
folding_codes = set()
for (code, mapping) in read_case_folding(case_folding):
for code, mapping in read_case_folding(case_folding):
folding_map[code] = mapping
if mapping not in rev_folding_map:
@ -466,9 +466,7 @@ def process_special_casing(special_casing, table, index):
(upper, lower, flags) = table[index[code]]
return ((code + lower) & 0xFFFF, (code + upper) & 0xFFFF)
for (code, lower, upper, languages, contexts) in read_special_casing(
special_casing
):
for code, lower, upper, languages, contexts in read_special_casing(special_casing):
assert code <= MAX_BMP, "Unexpected character outside of BMP: %s" % code
assert len(languages) <= 1, "Expected zero or one language ids: %s" % languages
assert len(contexts) <= 1, (
@ -686,7 +684,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
def describe_range(ranges, depth):
indent = depth * " "
for (start, end) in ranges:
for start, end in ranges:
if start == end:
println(indent, "// {}".format(codepoint_table.full_name(start)))
else:
@ -715,7 +713,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
def in_any_range(ranges, spaces):
"""Tests if the input character is included in any of the given ranges."""
lines = [[]]
for (start, end) in ranges:
for start, end in ranges:
expr = in_range(start, end, parenthesize=True)
line = " || ".join(lines[-1] + [expr])
if len(line) < (100 - len(spaces) - len(" ||")):
@ -836,9 +834,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
println("{")
println(" switch(ch) {")
for (code, converted) in sorted(
unconditional_toupper.items(), key=itemgetter(0)
):
for code, converted in sorted(unconditional_toupper.items(), key=itemgetter(0)):
println(
" case {}: return {}; // {}".format(
hexlit(code), len(converted), codepoint_table.name(code)
@ -860,9 +856,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
println("{")
println(" switch(ch) {")
for (code, converted) in sorted(
unconditional_toupper.items(), key=itemgetter(0)
):
for code, converted in sorted(unconditional_toupper.items(), key=itemgetter(0)):
println(
" case {}: // {}".format(hexlit(code), codepoint_table.name(code))
)
@ -1306,7 +1300,7 @@ def make_unicode_file(
println("bool")
println("js::unicode::{}(char32_t codePoint)".format(name))
println("{")
for (from_code, to_code) in int_ranges(group_set.keys()):
for from_code, to_code in int_ranges(group_set.keys()):
println(
" if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) {{ // {} .. {}".format(
from_code,
@ -1381,7 +1375,7 @@ def make_unicode_file(
def getsize(data):
"""return smallest possible integer size for the given array"""
maxdata = max(data)
assert maxdata < 2 ** 32
assert maxdata < 2**32
if maxdata < 256:
return 1
@ -1421,7 +1415,7 @@ def splitbins(t):
for shift in range(maxshift + 1):
t1 = []
t2 = []
size = 2 ** shift
size = 2**shift
bincache = {}
for i in range(0, len(t), size):
@ -1445,7 +1439,7 @@ def splitbins(t):
dump(t1, t2, shift, bytes)
# exhaustively verify that the decomposition is correct
mask = 2 ** shift - 1
mask = 2**shift - 1
for i in range(len(t)):
assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
return best

View File

@ -139,7 +139,7 @@ class OpcodeInfo:
def find_by_name(list, name):
for (n, body) in list:
for n, body in list:
if n == name:
return body

View File

@ -165,13 +165,13 @@ def print_doc(index):
)
)
for (category_name, types) in index:
for category_name, types in index:
print(
'<h3 id="{id}">{name}</h3>'.format(
name=category_name, id=make_element_id(category_name)
)
)
for (type_name, opcodes) in types:
for type_name, opcodes in types:
if type_name:
print(
'<h4 id="{id}">{name}</h4>'.format(

View File

@ -197,6 +197,7 @@ class Tile:
# 3 4 5
# 6 7 8
# Compute the source tiles' slice and border-width sizes
def make_src_tiles():
tiles = [Tile() for i in range(9)]

View File

@ -124,7 +124,6 @@ def makeLookup1():
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
@ -303,7 +302,6 @@ def makeLookup3():
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)

View File

@ -20,7 +20,7 @@ import fontforge
# generate a set of fonts, each with our special glyph at one codepoint,
# and nothing else
for codepoint in range(ord("A"), ord("D") + 1):
for (mark, width) in [("", 1500), ("2", 1800)]:
for mark, width in [("", 1500), ("2", 1800)]:
charname = chr(codepoint)
f = fontforge.font()
n = "Mark" + mark + charname
@ -41,8 +41,8 @@ for codepoint in range(ord("A"), ord("D") + 1):
for codepoint in range(ord("A"), ord("A") + 1):
for (mark, width) in [("", 1500), ("2", 1800)]:
for (uposname, upos) in [("low", -350), ("high", -50)]:
for mark, width in [("", 1500), ("2", 1800)]:
for uposname, upos in [("low", -350), ("high", -50)]:
charname = chr(codepoint)
f = fontforge.font()
n = "Mark" + mark + charname

View File

@ -62,7 +62,7 @@ def generate(output, dataFile):
"const int32_t nsCSSProps::"
"kIDLNameSortPositionTable[eCSSProperty_COUNT] = {\n"
)
for (p, position) in ps:
for p, position in ps:
output.write(" {},\n".format(position))
output.write("};\n\n")

View File

@ -134,7 +134,7 @@ class ReftestRunner(MozbuildObject):
hyphenation_path = os.path.join(self.topsrcdir, "intl", "locales")
for (dirpath, dirnames, filenames) in os.walk(hyphenation_path):
for dirpath, dirnames, filenames in os.walk(hyphenation_path):
for filename in filenames:
if filename.endswith(".dic"):
args.extraProfileFiles.append(os.path.join(dirpath, filename))

View File

@ -106,7 +106,6 @@ if sys.version_info[0] == 3:
raise value_.with_traceback(tb_)
raise value_
else:
exec("def reraise_(tp_, value_, tb_=None):\n raise tp_, value_, tb_\n")
@ -652,13 +651,13 @@ class RefTest(object):
]
stepResults = {}
for (descr, step) in steps:
for descr, step in steps:
stepResults[descr] = "not run / incomplete"
startTime = datetime.now()
maxTime = timedelta(seconds=options.verify_max_time)
finalResult = "PASSED"
for (descr, step) in steps:
for descr, step in steps:
if (datetime.now() - startTime) > maxTime:
self.log.info("::: Test verification is taking too long: Giving up!")
self.log.info(
@ -730,7 +729,7 @@ class RefTest(object):
# First job is only needs-focus tests. Remaining jobs are
# non-needs-focus and chunked.
perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus")
for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1):
for chunkNumber, jobArgs in enumerate(perProcessArgs[1:], start=1):
jobArgs[-1:-1] = [
"--focus-filter-mode=non-needs-focus",
"--total-chunks=%d" % jobsWithoutFocus,
@ -770,16 +769,16 @@ class RefTest(object):
# Output the summaries that the ReftestThread filters suppressed.
summaryObjects = [defaultdict(int) for s in summaryLines]
for t in threads:
for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines):
for summaryObj, (text, categories) in zip(summaryObjects, summaryLines):
threadMatches = t.summaryMatches[text]
for (attribute, description) in categories:
for attribute, description in categories:
amount = int(threadMatches.group(attribute) if threadMatches else 0)
summaryObj[attribute] += amount
amount = int(threadMatches.group("total") if threadMatches else 0)
summaryObj["total"] += amount
print("REFTEST INFO | Result summary:")
for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines):
for summaryObj, (text, categories) in zip(summaryObjects, summaryLines):
details = ", ".join(
[
"%d %s" % (summaryObj[attribute], description)
@ -863,7 +862,6 @@ class RefTest(object):
valgrindSuppFiles=None,
**profileArgs
):
if cmdargs is None:
cmdargs = []
cmdargs = cmdargs[:]

View File

@ -9,7 +9,6 @@ LOCAL_INCLUDES += [
]
if CONFIG["CPU_ARCH"] == "ppc64" and CONFIG["OS_TARGET"] == "Linux":
DEFINES["TOOLCHAIN_MISS_ASM_HWCAP_H"] = True
SOURCES += [

View File

@ -859,6 +859,7 @@ def prettyPrintDmdJson(out, j):
# Code for clamping addresses using conservative pointer analysis.
##################################################################
# Start is the address of the first byte of the block, while end is
# the address of the first byte after the final byte in the block.
class AddrRange:

View File

@ -555,6 +555,7 @@ set_config(
@imports(_from="itertools", _import="chain")
def gradle_android_dependencies_tasks(*tasks):
"""Gradle tasks run by |mach android dependencies|."""
# The union, plus a bit more, of all of the Gradle tasks
# invoked by the android-* automation jobs.
def withoutGeckoBinaries(task):

View File

@ -329,7 +329,6 @@ def android_geckoview_docs(
javadoc_path,
upload_message,
):
tasks = (
command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"]
if archive or upload

View File

@ -479,7 +479,7 @@ class TestGenerateStaticPrefList(unittest.TestCase):
def test_bad(self):
"Test various pieces of bad input."
for (input_string, expected) in bad_inputs:
for input_string, expected in bad_inputs:
inp = StringIO(input_string)
try:
pref_list = yaml.safe_load(inp)

View File

@ -956,7 +956,6 @@ def save_cache(build_environment, configure_cache):
@imports("glob")
@imports(_from="os.path", _import="exists")
def config_status_deps(build_env, build_project):
topsrcdir = build_env.topsrcdir
topobjdir = build_env.topobjdir

View File

@ -58,7 +58,6 @@ if CONFIG["OS_TARGET"] == "WINNT":
]
if CONFIG["MOZ_WIDGET_TOOLKIT"]:
if CONFIG["MOZ_MEMORY"] and FORCE_SHARED_LIB:
pass
# TODO: SHARED_LIBRARY_LIBS go here

View File

@ -21,7 +21,7 @@ class enumset_printer(object):
return (
("flag", gdb.Value(i).cast(self.enum_type))
for i in range(0, max_bit)
if ((bitfield & (2 ** i)) != 0)
if ((bitfield & (2**i)) != 0)
)
def to_string(self):

View File

@ -21,23 +21,23 @@ def process_config(toml_content):
if not new_base:
new_base = b"." # relpath to '.' is '', sadly
base_line = b'\nbasepath = "%s"' % new_base
content1 = re.sub(br"^\s*basepath\s*=\s*.+", base_line, toml_content, flags=re.M)
content1 = re.sub(rb"^\s*basepath\s*=\s*.+", base_line, toml_content, flags=re.M)
# process [[paths]]
start = 0
content2 = b""
for m in re.finditer(
br"\[\[\s*paths\s*\]\].+?(?=\[|\Z)", content1, re.M | re.DOTALL
rb"\[\[\s*paths\s*\]\].+?(?=\[|\Z)", content1, re.M | re.DOTALL
):
content2 += content1[start : m.start()]
path_content = m.group()
l10n_line = re.search(br"^\s*l10n\s*=.*$", path_content, flags=re.M).group()
l10n_line = re.search(rb"^\s*l10n\s*=.*$", path_content, flags=re.M).group()
# remove variable expansions
new_reference = re.sub(br"{\s*\S+\s*}", b"", l10n_line)
new_reference = re.sub(rb"{\s*\S+\s*}", b"", l10n_line)
# make the l10n a reference line
new_reference = re.sub(br"^(\s*)l10n(\s*=)", br"\1reference\2", new_reference)
new_reference = re.sub(rb"^(\s*)l10n(\s*=)", rb"\1reference\2", new_reference)
content2 += re.sub(
br"^\s*reference\s*=.*$", new_reference, path_content, flags=re.M
rb"^\s*reference\s*=.*$", new_reference, path_content, flags=re.M
)
start = m.end()
content2 += content1[start:]
@ -45,11 +45,11 @@ def process_config(toml_content):
start = 0
content3 = b""
for m in re.finditer(
br"\[\[\s*includes\s*\]\].+?(?=\[|\Z)", content2, re.M | re.DOTALL
rb"\[\[\s*includes\s*\]\].+?(?=\[|\Z)", content2, re.M | re.DOTALL
):
content3 += content2[start : m.start()]
include_content = m.group()
m_ = re.search(br'^\s*path = "(.+?)"', include_content, flags=re.M)
m_ = re.search(rb'^\s*path = "(.+?)"', include_content, flags=re.M)
content3 += (
include_content[: m_.start(1)]
+ generate_filename(m_.group(1))

View File

@ -452,7 +452,6 @@ def completion_fish(command_context, outfile):
cmds_opts.append(comp)
for sub in cmd.subcommands:
for opt_strs, description in sub.options.items():
comp = (
"complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "

View File

@ -137,7 +137,7 @@ def _patch_absolute_paths(sentry_event, topsrcdir: Path):
else:
return value
for (target_path, replacement) in (
for target_path, replacement in (
(get_state_dir(), "<statedir>"),
(str(topsrcdir), "<topsrcdir>"),
(str(Path.home()), "~"),

View File

@ -23,7 +23,6 @@ COMPLETE = (
def process_manifest(destdir, paths, track, no_symlinks=False, defines={}):
if os.path.exists(track):
# We use the same format as install manifests for the tracking
# data.

View File

@ -648,7 +648,6 @@ class FileRecordJSONDecoder(json.JSONDecoder):
class Manifest(object):
valid_formats = ("json",)
def __init__(self, file_records=None):

View File

@ -62,7 +62,6 @@ def verifyIniFile(initests, directory):
found = False
for f in files:
fname = f.split("/")[-1]
if fname.endswith(".in"):
fname = ".in".join(fname.split(".in")[:-1])

View File

@ -104,13 +104,13 @@ def android_version_code_v1(buildid, cpu_arch=None, min_sdk=0, max_sdk=0):
"android:versionCode from build ID %s: hours underflow "
"bits allotted!" % buildid
)
if base > 2 ** 17:
if base > 2**17:
raise ValueError(
"Something has gone horribly wrong: cannot calculate "
"android:versionCode from build ID %s: hours overflow "
"bits allotted!" % buildid
)
if base > 2 ** 17 - 366 * 24:
if base > 2**17 - 366 * 24:
raise ValueError(
"Running out of low order bits calculating "
"android:versionCode from build ID %s: "

View File

@ -513,7 +513,6 @@ def artifact_toolchain(
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ConnectionError,
) as e:
if isinstance(e, requests.exceptions.HTTPError):
# The relengapi proxy likes to return error 400 bad request
# which seems improbably to be due to our (simple) GET

View File

@ -363,7 +363,6 @@ class CommonBackend(BuildBackend):
)
def _handle_webidl_collection(self, webidls):
bindings_dir = mozpath.join(self.environment.topobjdir, "dom", "bindings")
all_inputs = set(webidls.all_static_sources())

View File

@ -121,7 +121,6 @@ class ConfigEnvironment(object):
source=None,
mozconfig=None,
):
if not source:
source = mozpath.join(topobjdir, "config.status")
self.source = source

View File

@ -343,8 +343,8 @@ class CppEclipseBackend(CommonBackend):
for i in args["includes"]:
dirsettings += add_abs_include_path(i)
for d in args["defines"]:
assert d[:2] == u"-D" or d[:2] == u"-U"
if d[:2] == u"-U":
assert d[:2] == "-D" or d[:2] == "-U"
if d[:2] == "-U":
# gfx/harfbuzz/src uses -UDEBUG, at least on Mac
# netwerk/sctp/src uses -U__APPLE__ on Mac
# XXX We should make this code smart enough to remove existing defines.

View File

@ -225,7 +225,7 @@ class FasterMakeBackend(MakeBackend, PartialBackend):
mk.create_rule([target]).add_dependencies(
"%s" % d[0] for d in sorted(deps, key=itemgetter(0))
)
for (merge, ref_file, l10n_file) in deps:
for merge, ref_file, l10n_file in deps:
rule = mk.create_rule([merge]).add_dependencies(
[ref_file, l10n_file] + python_deps
)

View File

@ -873,7 +873,6 @@ class RecursiveMakeBackend(MakeBackend):
unified_files_makefile_variable="unified_files",
include_curdir_build_rules=True,
):
# In case it's a generator.
unified_source_mapping = sorted(unified_source_mapping)

View File

@ -441,7 +441,6 @@ class VisualStudioBackend(CommonBackend):
def _create_natvis_type(
self, doc, visualizer, name, displayString, stringView=None
):
t = visualizer.appendChild(doc.createElement("Type"))
t.setAttribute("Name", name)
@ -593,7 +592,6 @@ class VisualStudioBackend(CommonBackend):
headers=[],
sources=[],
):
impl = getDOMImplementation()
doc = impl.createDocument(MSBUILD_NAMESPACE, "Project", None)

View File

@ -751,7 +751,7 @@ class MozbuildObject(ProcessExecutionMixin):
if not psutil or not job_size:
num_jobs = cpus
else:
mem_gb = psutil.virtual_memory().total / 1024 ** 3
mem_gb = psutil.virtual_memory().total / 1024**3
from_mem = round(mem_gb / job_size)
num_jobs = max(1, min(cpus, from_mem))
print(

View File

@ -99,7 +99,6 @@ class StaticAnalysisMonitor(object):
self._warnings_database = WarningsDatabase()
def on_warning(warning):
# Output paths relative to repository root if the paths are under repo tree
warning["filename"] = build_repo_relative_path(
warning["filename"], self._srcdir
@ -542,7 +541,6 @@ def _get_clang_tidy_command(
jobs,
fix,
):
if checks == "-*":
checks = ",".join(get_clang_tidy_config(command_context).checks)
@ -786,7 +784,6 @@ def autotest(
error_code = ret_val
if error_code != TOOLS_SUCCESS:
command_context.log(
logging.INFO,
"static-analysis",
@ -1560,7 +1557,6 @@ def get_clang_tools(
download_if_needed=True,
verbose=False,
):
rc, clang_paths = _set_clang_tools_paths(command_context)
if rc != 0:
@ -1803,7 +1799,6 @@ def _copy_clang_format_for_show_diff(
def _run_clang_format_path(
command_context, clang_format, paths, output_file, output_format
):
# Run clang-format on files or directories directly
from subprocess import CalledProcessError, check_output

View File

@ -42,7 +42,6 @@ class LcovRecord(object):
self.lines = {}
def __iadd__(self, other):
# These shouldn't differ.
self.source_file = other.source_file
if hasattr(other, "test_name"):

View File

@ -944,7 +944,6 @@ class ConfigureSandbox(dict):
@imports(_from='mozpack', _import='path', _as='mozpath')
"""
for value, required in ((_import, True), (_from, False), (_as, False)):
if not isinstance(value, six.string_types) and (
required or value is not None
):

View File

@ -919,8 +919,8 @@ class CCacheStats(object):
ABSOLUTE_KEYS = {"cache_files", "cache_size", "cache_max_size"}
FORMAT_KEYS = {"cache_size", "cache_max_size"}
GiB = 1024 ** 3
MiB = 1024 ** 2
GiB = 1024**3
MiB = 1024**2
KiB = 1024
def __init__(self, output=None, has_machine_format=False):

View File

@ -80,7 +80,6 @@ class Clobberer(object):
# Object directory clobber older than current is fine.
if os.path.getmtime(self.src_clobber) <= os.path.getmtime(self.obj_clobber):
return False
return True
@ -232,7 +231,7 @@ class Clobberer(object):
self.remove_objdir(False)
print("Successfully completed auto clobber.", file=fh)
return True, True, None
except (IOError) as error:
except IOError as error:
return (
True,
False,

View File

@ -231,7 +231,7 @@ def memory(**kwargs) -> DoctorCheck:
"""Check the host machine has the recommended memory to develop Firefox."""
memory = psutil.virtual_memory().total
# Convert to gigabytes.
memory_GB = memory / 1024 ** 3.0
memory_GB = memory / 1024**3.0
if memory_GB < MEMORY_THRESHOLD:
status = CheckStatus.WARNING
desc = "%.1fGB of physical memory, <%.1fGB" % (memory_GB, MEMORY_THRESHOLD)
@ -266,8 +266,8 @@ def storage_freespace(topsrcdir: str, topobjdir: str, **kwargs) -> List[DoctorCh
try:
usage = psutil.disk_usage(mount)
freespace, size = usage.free, usage.total
freespace_GB = freespace / 1024 ** 3
size_GB = size / 1024 ** 3
freespace_GB = freespace / 1024**3
size_GB = size / 1024**3
if freespace_GB < FREESPACE_THRESHOLD:
status = CheckStatus.WARNING
desc.append(

View File

@ -490,7 +490,6 @@ class LinkFlags(BaseCompileFlags):
not self._context.config.substs.get("MOZ_DEBUG"),
]
):
if self._context.config.substs.get("MOZ_OPTIMIZE"):
flags.append("-OPT:REF,ICF")

View File

@ -172,7 +172,6 @@ class TreeMetadataEmitter(LoggingMixin):
yield o
def _emit_libs_derived(self, contexts):
# First aggregate idl sources.
webidl_attrs = [
("GENERATED_EVENTS_WEBIDL_FILES", lambda c: c.generated_events_sources),
@ -1461,15 +1460,11 @@ class TreeMetadataEmitter(LoggingMixin):
if mozpath.split(base)[0] == "res":
has_resources = True
for f in files:
if (
var
in (
"FINAL_TARGET_PP_FILES",
"OBJDIR_PP_FILES",
"LOCALIZED_PP_FILES",
)
and not isinstance(f, SourcePath)
):
if var in (
"FINAL_TARGET_PP_FILES",
"OBJDIR_PP_FILES",
"LOCALIZED_PP_FILES",
) and not isinstance(f, SourcePath):
raise SandboxValidationError(
("Only source directory paths allowed in " + "%s: %s")
% (var, f),
@ -1679,7 +1674,7 @@ class TreeMetadataEmitter(LoggingMixin):
if not (generated_files or localized_generated_files):
return
for (localized, gen) in (
for localized, gen in (
(False, generated_files),
(True, localized_generated_files),
):

View File

@ -531,7 +531,6 @@ class BuildReaderError(Exception):
other_error=None,
sandbox_called_error=None,
):
self.file_stack = file_stack
self.trace = trace
self.sandbox_called_error = sandbox_called_error

View File

@ -297,7 +297,6 @@ def process_gn_config(
# Process all targets from the given gn project and its dependencies.
for target_fullname, spec in six.iteritems(targets):
target_path, target_name = target_info(target_fullname)
context_attrs = {}
@ -396,7 +395,7 @@ def process_gn_config(
".mm": ("CMMFLAGS", ["cflags", "cflags_objcc"]),
}
variables = (suffix_map[e] for e in extensions if e in suffix_map)
for (var, flag_keys) in variables:
for var, flag_keys in variables:
flags = [
_f for _k in flag_keys for _f in spec.get(_k, []) if _f in mozilla_flags
]
@ -522,7 +521,6 @@ def write_mozbuild(
mozilla_flags,
write_mozbuild_variables,
):
all_mozbuild_results = []
for gn_config in gn_configs:
@ -626,7 +624,6 @@ def write_mozbuild(
("OS_TARGET", "CPU_ARCH"),
("OS_TARGET", "CPU_ARCH", "MOZ_X11"),
):
conditions = set()
for args in dirs_by_config.keys():
cond = tuple(((k, dict(args).get(k) or "") for k in attrs))

View File

@ -98,7 +98,6 @@ class DeprecatedJarManifest(Exception):
class JarManifestParser(object):
ignore = re.compile("\s*(\#.*)?$")
jarline = re.compile(
"""
@ -210,7 +209,6 @@ class JarMaker(object):
def __init__(
self, outputFormat="flat", useJarfileManifest=True, useChromeManifest=False
):
self.outputFormat = outputFormat
self.useJarfileManifest = useJarfileManifest
self.useChromeManifest = useChromeManifest

View File

@ -936,7 +936,6 @@ def gtest(
debugger,
debugger_args,
):
# We lazy build gtest because it's slow to link
try:
command_context.config_environment

View File

@ -286,7 +286,6 @@ class MozconfigLoader(object):
in_variable = None
for line in output.splitlines():
if not line:
continue

View File

@ -13,7 +13,6 @@ from mozbuild.repackaging.application_ini import get_application_ini_value
def repackage_dmg(infile, output):
if not tarfile.is_tarfile(infile):
raise Exception("Input file %s is not a valid tarfile." % infile)

View File

@ -20,7 +20,6 @@ _MSI_ARCH = {
def update_wsx(wfile, pvalues):
parsed = minidom.parse(wfile)
# construct a dictinary for the pre-processing options

View File

@ -13,7 +13,6 @@ from mozbuild.bootstrap import bootstrap_toolchain
def repackage_pkg(infile, output):
if not tarfile.is_tarfile(infile):
raise Exception("Input file %s is not a valid tarfile." % infile)

View File

@ -154,7 +154,7 @@ langpack-contributors = { "" }
self.assertEqual(len(description), 132)
def test_get_version_maybe_buildid(self):
for (app_version, buildid, expected_version) in [
for app_version, buildid, expected_version in [
("109", "", "109"),
("109.0", "", "109.0"),
("109.0.0", "", "109.0.0"),

View File

@ -1292,7 +1292,7 @@ class TestRecursiveMakeBackend(BackendTester):
("not-installed", "not-installed.prog"),
]
prefix = "PROGRAM = "
for (subdir, expected_program) in expected:
for subdir, expected_program in expected:
with io.open(os.path.join(env.topobjdir, subdir, "backend.mk"), "r") as fh:
lines = fh.readlines()
program = [

View File

@ -4,6 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# For more complex and repetitive things, we can create templates
@template
def check_compiler_flag(flag):
@ -18,6 +19,7 @@ def check_compiler_flag(flag):
check_compiler_flag("-Werror=foobar")
# Normal functions can be used in @depends functions.
def fortytwo():
return 42
@ -45,6 +47,7 @@ def check(value):
set_config("TEMPLATE_VALUE_2", check)
# Normal functions can use @imports too to import modules.
@imports("sys")
def platform():

Some files were not shown because too many files have changed in this diff Show More