bug 1463425 - autopep8 on config/ r=gps

MozReview-Commit-ID: EaTAhH2CAee

--HG--
extra : rebase_source : f278cd9fc6e8f9db720c1430121ba91e0417c9b9
This commit is contained in:
Sylvestre Ledru 2018-05-22 00:01:01 +02:00
parent dcfef841a7
commit 3d085810e5
20 changed files with 814 additions and 720 deletions

View File

@ -10,128 +10,129 @@ from mozbuild.util import lock_file
class ZipFile(zipfile.ZipFile):
""" Class with methods to open, read, write, close, list zip files.
""" Class with methods to open, read, write, close, list zip files.
Subclassing zipfile.ZipFile to allow for overwriting of existing
entries, though only for writestr, not for write.
"""
def __init__(self, file, mode="r", compression=zipfile.ZIP_STORED,
lock = False):
if lock:
assert isinstance(file, basestring)
self.lockfile = lock_file(file + '.lck')
else:
self.lockfile = None
if mode == 'a' and lock:
# appending to a file which doesn't exist fails, but we can't check
# existence util we hold the lock
if (not os.path.isfile(file)) or os.path.getsize(file) == 0:
mode = 'w'
zipfile.ZipFile.__init__(self, file, mode, compression)
self._remove = []
self.end = self.fp.tell()
self.debug = 0
def writestr(self, zinfo_or_arcname, bytes):
"""Write contents into the archive.
The contents is the argument 'bytes', 'zinfo_or_arcname' is either
a ZipInfo instance or the name of the file in the archive.
This method is overloaded to allow overwriting existing entries.
Subclassing zipfile.ZipFile to allow for overwriting of existing
entries, though only for writestr, not for write.
"""
if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time()))
zinfo.compress_type = self.compression
# Add some standard UNIX file access permissions (-rw-r--r--).
zinfo.external_attr = (0x81a4 & 0xFFFF) << 16L
else:
zinfo = zinfo_or_arcname
# Now to the point why we overwrote this in the first place,
# remember the entry numbers if we already had this entry.
# Optimizations:
# If the entry to overwrite is the last one, just reuse that.
# If we store uncompressed and the new content has the same size
# as the old, reuse the existing entry.
doSeek = False # store if we need to seek to the eof after overwriting
if self.NameToInfo.has_key(zinfo.filename):
# Find the last ZipInfo with our name.
# Last, because that's catching multiple overwrites
i = len(self.filelist)
while i > 0:
i -= 1
if self.filelist[i].filename == zinfo.filename:
break
zi = self.filelist[i]
if ((zinfo.compress_type == zipfile.ZIP_STORED
and zi.compress_size == len(bytes))
or (i + 1) == len(self.filelist)):
# make sure we're allowed to write, otherwise done by writestr below
self._writecheck(zi)
# overwrite existing entry
self.fp.seek(zi.header_offset)
if (i + 1) == len(self.filelist):
# this is the last item in the file, just truncate
self.fp.truncate()
def __init__(self, file, mode="r", compression=zipfile.ZIP_STORED,
lock=False):
if lock:
assert isinstance(file, basestring)
self.lockfile = lock_file(file + '.lck')
else:
# we need to move to the end of the file afterwards again
doSeek = True
# unhook the current zipinfo, the writestr of our superclass
# will add a new one
self.filelist.pop(i)
self.NameToInfo.pop(zinfo.filename)
else:
# Couldn't optimize, sadly, just remember the old entry for removal
self._remove.append(self.filelist.pop(i))
zipfile.ZipFile.writestr(self, zinfo, bytes)
self.filelist.sort(lambda l, r: cmp(l.header_offset, r.header_offset))
if doSeek:
self.fp.seek(self.end)
self.end = self.fp.tell()
self.lockfile = None
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records.
if mode == 'a' and lock:
# appending to a file which doesn't exist fails, but we can't check
# existence util we hold the lock
if (not os.path.isfile(file)) or os.path.getsize(file) == 0:
mode = 'w'
Overwritten to compact overwritten entries.
"""
if not self._remove:
# we don't have anything special to do, let's just call base
r = zipfile.ZipFile.close(self)
self.lockfile = None
return r
zipfile.ZipFile.__init__(self, file, mode, compression)
self._remove = []
self.end = self.fp.tell()
self.debug = 0
if self.fp.mode != 'r+b':
# adjust file mode if we originally just wrote, now we rewrite
self.fp.close()
self.fp = open(self.filename, 'r+b')
all = map(lambda zi: (zi, True), self.filelist) + \
map(lambda zi: (zi, False), self._remove)
all.sort(lambda l, r: cmp(l[0].header_offset, r[0].header_offset))
# empty _remove for multiple closes
self._remove = []
def writestr(self, zinfo_or_arcname, bytes):
"""Write contents into the archive.
lengths = [all[i+1][0].header_offset - all[i][0].header_offset
for i in xrange(len(all)-1)]
lengths.append(self.end - all[-1][0].header_offset)
to_pos = 0
for (zi, keep), length in zip(all, lengths):
if not keep:
continue
oldoff = zi.header_offset
# python <= 2.4 has file_offset
if hasattr(zi, 'file_offset'):
zi.file_offset = zi.file_offset + to_pos - oldoff
zi.header_offset = to_pos
self.fp.seek(oldoff)
content = self.fp.read(length)
self.fp.seek(to_pos)
self.fp.write(content)
to_pos += length
self.fp.truncate()
zipfile.ZipFile.close(self)
self.lockfile = None
The contents is the argument 'bytes', 'zinfo_or_arcname' is either
a ZipInfo instance or the name of the file in the archive.
This method is overloaded to allow overwriting existing entries.
"""
if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time()))
zinfo.compress_type = self.compression
# Add some standard UNIX file access permissions (-rw-r--r--).
zinfo.external_attr = (0x81a4 & 0xFFFF) << 16L
else:
zinfo = zinfo_or_arcname
# Now to the point why we overwrote this in the first place,
# remember the entry numbers if we already had this entry.
# Optimizations:
# If the entry to overwrite is the last one, just reuse that.
# If we store uncompressed and the new content has the same size
# as the old, reuse the existing entry.
doSeek = False # store if we need to seek to the eof after overwriting
if self.NameToInfo.has_key(zinfo.filename):
# Find the last ZipInfo with our name.
# Last, because that's catching multiple overwrites
i = len(self.filelist)
while i > 0:
i -= 1
if self.filelist[i].filename == zinfo.filename:
break
zi = self.filelist[i]
if ((zinfo.compress_type == zipfile.ZIP_STORED
and zi.compress_size == len(bytes))
or (i + 1) == len(self.filelist)):
# make sure we're allowed to write, otherwise done by writestr below
self._writecheck(zi)
# overwrite existing entry
self.fp.seek(zi.header_offset)
if (i + 1) == len(self.filelist):
# this is the last item in the file, just truncate
self.fp.truncate()
else:
# we need to move to the end of the file afterwards again
doSeek = True
# unhook the current zipinfo, the writestr of our superclass
# will add a new one
self.filelist.pop(i)
self.NameToInfo.pop(zinfo.filename)
else:
# Couldn't optimize, sadly, just remember the old entry for removal
self._remove.append(self.filelist.pop(i))
zipfile.ZipFile.writestr(self, zinfo, bytes)
self.filelist.sort(lambda l, r: cmp(l.header_offset, r.header_offset))
if doSeek:
self.fp.seek(self.end)
self.end = self.fp.tell()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records.
Overwritten to compact overwritten entries.
"""
if not self._remove:
# we don't have anything special to do, let's just call base
r = zipfile.ZipFile.close(self)
self.lockfile = None
return r
if self.fp.mode != 'r+b':
# adjust file mode if we originally just wrote, now we rewrite
self.fp.close()
self.fp = open(self.filename, 'r+b')
all = map(lambda zi: (zi, True), self.filelist) + \
map(lambda zi: (zi, False), self._remove)
all.sort(lambda l, r: cmp(l[0].header_offset, r[0].header_offset))
# empty _remove for multiple closes
self._remove = []
lengths = [all[i+1][0].header_offset - all[i][0].header_offset
for i in xrange(len(all)-1)]
lengths.append(self.end - all[-1][0].header_offset)
to_pos = 0
for (zi, keep), length in zip(all, lengths):
if not keep:
continue
oldoff = zi.header_offset
# python <= 2.4 has file_offset
if hasattr(zi, 'file_offset'):
zi.file_offset = zi.file_offset + to_pos - oldoff
zi.header_offset = to_pos
self.fp.seek(oldoff)
content = self.fp.read(length)
self.fp.seek(to_pos)
self.fp.write(content)
to_pos += length
self.fp.truncate()
zipfile.ZipFile.close(self)
self.lockfile = None

View File

@ -17,7 +17,7 @@ import sys
from mozversioncontrol import get_repository_from_env
scriptname = os.path.basename(__file__);
scriptname = os.path.basename(__file__)
expected_encoding = 'ascii'
# The following files don't define JSErrorFormatString.
@ -26,13 +26,16 @@ ignore_files = [
'js/xpconnect/src/xpc.msg',
]
def log_pass(filename, text):
print('TEST-PASS | {} | {} | {}'.format(scriptname, filename, text))
def log_fail(filename, text):
print('TEST-UNEXPECTED-FAIL | {} | {} | {}'.format(scriptname, filename,
text))
def check_single_file(filename):
with open(filename, 'rb') as f:
data = f.read()
@ -44,6 +47,7 @@ def check_single_file(filename):
log_pass(filename, 'ok')
return True
def check_files():
result = True
@ -58,11 +62,13 @@ def check_files():
return result
def main():
if not check_files():
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()

View File

@ -12,15 +12,18 @@ from __future__ import print_function
import os
import sys
scriptname = os.path.basename(__file__);
scriptname = os.path.basename(__file__)
topsrcdir = os.path.dirname(os.path.dirname(__file__))
def log_pass(text):
print('TEST-PASS | {} | {}'.format(scriptname, text))
def log_fail(text):
print('TEST-UNEXPECTED-FAIL | {} | {}'.format(scriptname, text))
def check_opcode():
sys.path.insert(0, os.path.join(topsrcdir, 'js', 'src', 'vm'))
import opcode
@ -33,11 +36,13 @@ def check_opcode():
log_pass('ok')
return True
def main():
if not check_opcode():
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()

View File

@ -37,7 +37,8 @@ reArgType = "(?P<type>[\w\s:*&]+)"
reArgName = "(?P<name>\s\w+)"
reArgDefault = "(?P<default>(?:\s=[^,)]+)?)"
reAfterArg = "(?=[,)])"
reMatchArg = re.compile(reBeforeArg + reArgType + reArgName + reArgDefault + reAfterArg)
reMatchArg = re.compile(reBeforeArg + reArgType +
reArgName + reArgDefault + reAfterArg)
def get_normalized_signatures(signature, fileAnnot=None):
@ -58,7 +59,8 @@ def get_normalized_signatures(signature, fileAnnot=None):
archs = [fileAnnot['arch']]
if 'DEFINED_ON(' in signature:
archs = re.sub(r'.*DEFINED_ON\((?P<archs>[^()]*)\).*', '\g<archs>', signature).split(',')
archs = re.sub(
r'.*DEFINED_ON\((?P<archs>[^()]*)\).*', '\g<archs>', signature).split(',')
archs = [a.strip() for a in archs]
signature = re.sub(r'\s+DEFINED_ON\([^()]*\)', '', signature)
@ -157,7 +159,8 @@ def get_macroassembler_definitions(filename):
line = re.sub(r'//.*', '', line)
if line.startswith('{') or line.strip() == "{}":
if 'MacroAssembler::' in lines:
signatures.extend(get_normalized_signatures(lines, fileAnnot))
signatures.extend(
get_normalized_signatures(lines, fileAnnot))
if line.strip() != "{}": # Empty declaration, no need to declare
# a new code section
code_section = True
@ -244,7 +247,8 @@ def generate_file_content(signatures):
elif len(archs.symmetric_difference(all_shared_architecture_names)) == 0:
output.append(s + ' PER_SHARED_ARCH;\n')
else:
output.append(s + ' DEFINED_ON(' + ', '.join(sorted(archs)) + ');\n')
output.append(
s + ' DEFINED_ON(' + ', '.join(sorted(archs)) + ');\n')
for a in sorted(archs):
a = a.replace('_', '-')
masm = '%s/MacroAssembler-%s' % (a, a)
@ -271,8 +275,10 @@ def check_style():
filepath = os.path.join(dirpath, filename).replace('\\', '/')
if filepath.endswith('MacroAssembler.h'):
decls = append_signatures(decls, get_macroassembler_declaration(filepath))
defs = append_signatures(defs, get_macroassembler_definitions(filepath))
decls = append_signatures(
decls, get_macroassembler_declaration(filepath))
defs = append_signatures(
defs, get_macroassembler_definitions(filepath))
if not decls or not defs:
raise Exception("Did not find any definitions or declarations")

View File

@ -47,11 +47,10 @@ else:
print("There are more occurrences of /{0}/ than expected. We're trying "
"to prevent an increase in the number of {1}'s, using {2} if "
"possible. If it is unavoidable, you should update the expected "
"count {3}.".format(search_string, search_string, replacement,
error_location))
"count {3}.".format(search_string, search_string, replacement,
error_location))
print("Expected: {0}; found: {1}".format(expected_count, count))
for k in sorted(details):
print("Found {0} occurences in {1}".format(details[k],k))
print("Found {0} occurences in {1}".format(details[k], k))
sys.exit(-1)

View File

@ -45,12 +45,12 @@ import sys
# We don't bother checking files in these directories, because they're (a) auxiliary or (b)
# imported code that doesn't follow our coding style.
ignored_js_src_dirs = [
'js/src/config/', # auxiliary stuff
'js/src/ctypes/libffi/', # imported code
'js/src/devtools/', # auxiliary stuff
'js/src/editline/', # imported code
'js/src/gdb/', # auxiliary stuff
'js/src/vtune/' # imported code
'js/src/config/', # auxiliary stuff
'js/src/ctypes/libffi/', # imported code
'js/src/devtools/', # auxiliary stuff
'js/src/editline/', # imported code
'js/src/gdb/', # auxiliary stuff
'js/src/vtune/' # imported code
]
# We ignore #includes of these files, because they don't follow the usual rules.
@ -58,9 +58,9 @@ included_inclnames_to_ignore = set([
'ffi.h', # generated in ctypes/libffi/
'devtools/sharkctl.h', # we ignore devtools/ in general
'devtools/Instruments.h', # we ignore devtools/ in general
'double-conversion/double-conversion.h', # strange MFBT case
'double-conversion/double-conversion.h', # strange MFBT case
'javascript-trace.h', # generated in $OBJDIR if HAVE_DTRACE is defined
'frontend/ReservedWordsGenerated.h', # generated in $OBJDIR
'frontend/ReservedWordsGenerated.h', # generated in $OBJDIR
'gc/StatsPhasesGenerated.h', # generated in $OBJDIR
'gc/StatsPhasesGenerated.cpp', # generated in $OBJDIR
'jit/LOpcodes.h', # generated in $OBJDIR
@ -92,7 +92,7 @@ included_inclnames_to_ignore = set([
'unicode/ucol.h', # ICU
'unicode/udat.h', # ICU
'unicode/udatpg.h', # ICU
'unicode/udisplaycontext.h',# ICU
'unicode/udisplaycontext.h', # ICU
'unicode/uenum.h', # ICU
'unicode/uloc.h', # ICU
'unicode/unistr.h', # ICU
@ -110,8 +110,9 @@ included_inclnames_to_ignore = set([
# ignore #includes of them when checking #include ordering.
oddly_ordered_inclnames = set([
'ctypes/typedefs.h', # Included multiple times in the body of ctypes/CTypes.h
'frontend/BinSource-auto.h', # Included in the body of frontend/BinSource.h
'frontend/ReservedWordsGenerated.h', # Included in the body of frontend/TokenStream.h
'frontend/BinSource-auto.h', # Included in the body of frontend/BinSource.h
# Included in the body of frontend/TokenStream.h
'frontend/ReservedWordsGenerated.h',
'gc/StatsPhasesGenerated.h', # Included in the body of gc/Statistics.h
'gc/StatsPhasesGenerated.cpp', # Included in the body of gc/Statistics.cpp
'psapi.h', # Must be included after "util/Windows.h" on Windows
@ -317,7 +318,8 @@ def check_style(enable_fixup):
with open(filename, 'w') as f:
f.write(code.to_source())
check_file(filename, inclname, file_kind, code, all_inclnames, included_h_inclnames)
check_file(filename, inclname, file_kind, code,
all_inclnames, included_h_inclnames)
edges[inclname] = included_h_inclnames
@ -326,7 +328,7 @@ def check_style(enable_fixup):
# Compare expected and actual output.
difflines = difflib.unified_diff(expected_output, actual_output,
fromfile='check_spidermonkey_style.py expected output',
tofile='check_spidermonkey_style.py actual output')
tofile='check_spidermonkey_style.py actual output')
ok = True
for diffline in difflines:
ok = False
@ -435,6 +437,7 @@ class CppBlock(object):
Each kid is either an Include (representing a #include), OrdinaryCode, or
a nested CppBlock.'''
def __init__(self, start_line=""):
self.start = start_line
self.end = ''
@ -508,7 +511,8 @@ class CppBlock(object):
cutoff = last_include_index + 1
if should_try_to_sort(includes):
output.extend(pretty_sorted_includes(includes) + batch[cutoff:])
output.extend(pretty_sorted_includes(
includes) + batch[cutoff:])
else:
output.extend(batch)
del batch[:]
@ -540,6 +544,7 @@ class CppBlock(object):
class OrdinaryCode(object):
''' A list of lines of code that aren't #include/#if/#else/#endif lines. '''
def __init__(self, lines=None):
self.lines = lines if lines is not None else []
@ -566,14 +571,16 @@ def read_file(f):
m = re.match(r'(\s*#\s*include\s+)"([^"]*)"(.*)', line)
if m is not None:
prefix, inclname, suffix = m.groups()
block_stack[-1].kids.append(Include(prefix, inclname, suffix, linenum, is_system=False))
block_stack[-1].kids.append(Include(prefix,
inclname, suffix, linenum, is_system=False))
continue
# Look for a |#include <...>| line.
m = re.match(r'(\s*#\s*include\s+)<([^>]*)>(.*)', line)
if m is not None:
prefix, inclname, suffix = m.groups()
block_stack[-1].kids.append(Include(prefix, inclname, suffix, linenum, is_system=True))
block_stack[-1].kids.append(Include(prefix,
inclname, suffix, linenum, is_system=True))
continue
# Look for a |#{if,ifdef,ifndef}| line.
@ -601,7 +608,8 @@ def read_file(f):
# Close the current block.
block_stack.pop().end = line
if len(block_stack) == 0:
raise ValueError("#endif without #if at line " + str(linenum))
raise ValueError(
"#endif without #if at line " + str(linenum))
continue
# Otherwise, we have an ordinary line.
@ -648,7 +656,8 @@ def check_file(filename, inclname, file_kind, code, all_inclnames, included_h_in
# Check a file doesn't #include itself. (We do this here because the cycle
# detection below doesn't detect this case.)
if inclname == include.inclname:
error(filename, include.linenum, 'the file includes itself')
error(filename, include.linenum,
'the file includes itself')
def check_includes_order(include1, include2):
'''Check the ordering of two #include statements.'''
@ -689,6 +698,7 @@ def find_cycles(all_inclnames, edges):
def draw_SCC(c):
cset = set(c)
drawn = set()
def draw(v, indent):
out(' ' * indent + ('-> ' if indent else ' ') + v)
if v in drawn:
@ -704,7 +714,8 @@ def find_cycles(all_inclnames, edges):
for scc in sorted(SCCs):
if len(scc) != 1:
if not have_drawn_an_SCC:
error('(multiple files)', None, 'header files form one or more cycles')
error('(multiple files)', None,
'header files form one or more cycles')
have_drawn_an_SCC = True
draw_SCC(scc)

View File

@ -94,9 +94,9 @@ def main():
r'memalign',
# These three aren't available on all Linux configurations.
#r'posix_memalign',
#r'aligned_alloc',
#r'valloc',
# r'posix_memalign',
# r'aligned_alloc',
# r'valloc',
]
if args.aggressive:
@ -159,7 +159,6 @@ def main():
# Try to give more precise information about the offending code.
emit_line_info = True
# Check that all functions we expect are used in jsutil.cpp. (This will
# fail if the function-detection code breaks at any point.)
for fn in alloc_fns_unescaped:
@ -199,7 +198,8 @@ def main():
for line in lines:
m = re.search(alloc_lines_re, line)
if m:
print('check_vanilla_allocations.py:', m.group(1), 'called at', m.group(3))
print('check_vanilla_allocations.py:',
m.group(1), 'called at', m.group(3))
if has_failed:
sys.exit(1)

View File

@ -9,6 +9,7 @@
import sys
import os
def get_build_entries(root_path):
""" Iterates through the root_path, creating a list for each file and
directory. Excludes any file paths ending with channel-prefs.js.
@ -39,6 +40,7 @@ def get_build_entries(root_path):
return rel_file_path_list, rel_dir_path_list
def generate_precomplete(root_path):
""" Creates the precomplete file containing the remove and rmdir
application update instructions. The given directory is used
@ -50,7 +52,7 @@ def generate_precomplete(root_path):
root_path = os.path.abspath(os.path.join(root_path, '../../'))
rel_path_precomplete = "Contents/Resources/precomplete"
precomplete_file_path = os.path.join(root_path,rel_path_precomplete)
precomplete_file_path = os.path.join(root_path, rel_path_precomplete)
# Open the file so it exists before building the list of files and open it
# in binary mode to prevent OS specific line endings.
precomplete_file = open(precomplete_file_path, "wb")
@ -63,5 +65,6 @@ def generate_precomplete(root_path):
precomplete_file.close()
if __name__ == "__main__":
generate_precomplete(os.getcwd())

View File

@ -29,138 +29,151 @@ from optparse import OptionParser
#####################################################################
# Utility functions
#####################################################################
def run(args, stdin=None):
class ThreadWorker(threading.Thread):
def __init__(self, pipe):
super(ThreadWorker, self).__init__()
self.all = ""
self.pipe = pipe
self.setDaemon(True)
class ThreadWorker(threading.Thread):
def __init__(self, pipe):
super(ThreadWorker, self).__init__()
self.all = ""
self.pipe = pipe
self.setDaemon(True)
def run(self):
while True:
line = self.pipe.readline()
if line == '': break
else:
self.all += line
def run(self):
while True:
line = self.pipe.readline()
if line == '':
break
else:
self.all += line
try:
if type(args) == str:
args = shlex.split(args)
try:
if type(args) == str:
args = shlex.split(args)
args = [str(a) for a in args] # convert to strs
args = [str(a) for a in args] # convert to strs
stdin_pipe = subprocess.PIPE if stdin else None
proc = subprocess.Popen(args, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if stdin_pipe:
proc.stdin.write(stdin)
proc.stdin.close()
stdin_pipe = subprocess.PIPE if stdin else None
proc = subprocess.Popen(args, stdin=stdin_pipe,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if stdin_pipe:
proc.stdin.write(stdin)
proc.stdin.close()
stdout_worker = ThreadWorker(proc.stdout)
stderr_worker = ThreadWorker(proc.stderr)
stdout_worker.start()
stderr_worker.start()
stdout_worker = ThreadWorker(proc.stdout)
stderr_worker = ThreadWorker(proc.stderr)
stdout_worker.start()
stderr_worker.start()
proc.wait()
stdout_worker.join()
stderr_worker.join()
proc.wait()
stdout_worker.join()
stderr_worker.join()
except KeyboardInterrupt as e:
sys.exit(-1)
except KeyboardInterrupt as e:
sys.exit(-1)
stdout, stderr = stdout_worker.all, stderr_worker.all
result = (stdout, stderr, proc.returncode)
return result
stdout, stderr = stdout_worker.all, stderr_worker.all
result = (stdout, stderr, proc.returncode)
return result
def get_js_files():
(out, err, exit) = run('find ../jit-test/tests -name "*.js"')
if (err, exit) != ("", 0):
sys.exit("Wrong directory, run from an objdir")
return out.split()
(out, err, exit) = run('find ../jit-test/tests -name "*.js"')
if (err, exit) != ("", 0):
sys.exit("Wrong directory, run from an objdir")
return out.split()
#####################################################################
# Blacklisting
#####################################################################
def in_blacklist(sig):
return sig in blacklist
return sig in blacklist
def add_to_blacklist(sig):
blacklist[sig] = blacklist.get(sig, 0)
blacklist[sig] += 1
blacklist[sig] = blacklist.get(sig, 0)
blacklist[sig] += 1
# How often is a particular lines important for this.
def count_lines():
"""Keep track of the amount of times individual lines occur, in order to
prioritize the errors which occur most frequently."""
counts = {}
for string,count in blacklist.items():
for line in string.split("\n"):
counts[line] = counts.get(line, 0) + count
"""Keep track of the amount of times individual lines occur, in order to
prioritize the errors which occur most frequently."""
counts = {}
for string, count in blacklist.items():
for line in string.split("\n"):
counts[line] = counts.get(line, 0) + count
lines = []
for k,v in counts.items():
lines.append("{0:6}: {1}".format(v, k))
lines = []
for k, v in counts.items():
lines.append("{0:6}: {1}".format(v, k))
lines.sort()
lines.sort()
countlog = file("../OOM_count_log", "w")
countlog.write("\n".join(lines))
countlog.flush()
countlog.close()
countlog = file("../OOM_count_log", "w")
countlog.write("\n".join(lines))
countlog.flush()
countlog.close()
#####################################################################
# Output cleaning
#####################################################################
def clean_voutput(err):
# Skip what we can't reproduce
err = re.sub(r"^--\d+-- run: /usr/bin/dsymutil \"shell/js\"$", "", err, flags=re.MULTILINE)
err = re.sub(r"^==\d+==", "", err, flags=re.MULTILINE)
err = re.sub(r"^\*\*\d+\*\*", "", err, flags=re.MULTILINE)
err = re.sub(r"^\s+by 0x[0-9A-Fa-f]+: ", "by: ", err, flags=re.MULTILINE)
err = re.sub(r"^\s+at 0x[0-9A-Fa-f]+: ", "at: ", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is not stack'd)", r"\1\2", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid write of size )\d+", r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid read of size )\d+", r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is )\d+( bytes inside a block of size )[0-9,]+( free'd)", r"\1\2\3\4", err, flags=re.MULTILINE)
# Skip what we can't reproduce
err = re.sub(r"^--\d+-- run: /usr/bin/dsymutil \"shell/js\"$",
"", err, flags=re.MULTILINE)
err = re.sub(r"^==\d+==", "", err, flags=re.MULTILINE)
err = re.sub(r"^\*\*\d+\*\*", "", err, flags=re.MULTILINE)
err = re.sub(r"^\s+by 0x[0-9A-Fa-f]+: ", "by: ", err, flags=re.MULTILINE)
err = re.sub(r"^\s+at 0x[0-9A-Fa-f]+: ", "at: ", err, flags=re.MULTILINE)
err = re.sub(
r"(^\s+Address 0x)[0-9A-Fa-f]+( is not stack'd)", r"\1\2", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid write of size )\d+",
r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Invalid read of size )\d+",
r"\1x", err, flags=re.MULTILINE)
err = re.sub(r"(^\s+Address 0x)[0-9A-Fa-f]+( is )\d+( bytes inside a block of size )[0-9,]+( free'd)",
r"\1\2\3\4", err, flags=re.MULTILINE)
# Skip the repeating bit due to the segfault
lines = []
for l in err.split('\n'):
if l == " Process terminating with default action of signal 11 (SIGSEGV)":
break
lines.append(l)
err = '\n'.join(lines)
# Skip the repeating bit due to the segfault
lines = []
for l in err.split('\n'):
if l == " Process terminating with default action of signal 11 (SIGSEGV)":
break
lines.append(l)
err = '\n'.join(lines)
return err
return err
def remove_failed_allocation_backtraces(err):
lines = []
lines = []
add = True
for l in err.split('\n'):
add = True
for l in err.split('\n'):
# Set start and end conditions for including text
if l == " The site of the failed allocation is:":
add = False
elif l[:2] not in ['by: ', 'at:']:
add = True
# Set start and end conditions for including text
if l == " The site of the failed allocation is:":
add = False
elif l[:2] not in ['by: ', 'at:']:
add = True
if add:
lines.append(l)
if add:
lines.append(l)
err = '\n'.join(lines)
err = '\n'.join(lines)
return err
return err
def clean_output(err):
err = re.sub(r"^js\(\d+,0x[0-9a-f]+\) malloc: \*\*\* error for object 0x[0-9a-f]+: pointer being freed was not allocated\n\*\*\* set a breakppoint in malloc_error_break to debug\n$", "pointer being freed was not allocated", err, flags=re.MULTILINE)
err = re.sub(r"^js\(\d+,0x[0-9a-f]+\) malloc: \*\*\* error for object 0x[0-9a-f]+: pointer being freed was not allocated\n\*\*\* set a breakppoint in malloc_error_break to debug\n$",
"pointer being freed was not allocated", err, flags=re.MULTILINE)
return err
return err
#####################################################################
@ -168,10 +181,10 @@ def clean_output(err):
#####################################################################
command_template = 'shell/js' \
+ ' -m -j -p' \
+ ' -e "const platform=\'darwin\'; const libdir=\'../jit-test/lib/\';"' \
+ ' -f ../jit-test/lib/prolog.js' \
+ ' -f {0}'
+ ' -m -j -p' \
+ ' -e "const platform=\'darwin\'; const libdir=\'../jit-test/lib/\';"' \
+ ' -f ../jit-test/lib/prolog.js' \
+ ' -f {0}'
# Blacklists are things we don't want to see in our logs again (though we do
@ -179,15 +192,15 @@ command_template = 'shell/js' \
# logs again, principally because the information we have isn't enough.
blacklist = {}
add_to_blacklist(r"('', '', 1)") # 1 means OOM if the shell hasn't launched yet.
# 1 means OOM if the shell hasn't launched yet.
add_to_blacklist(r"('', '', 1)")
add_to_blacklist(r"('', 'out of memory\n', 1)")
whitelist = set()
whitelist.add(r"('', 'out of memory\n', -11)") # -11 means OOM
whitelist.add(r"('', 'out of memory\n', -11)") # -11 means OOM
whitelist.add(r"('', 'out of memory\nout of memory\n', -11)")
#####################################################################
# Program
#####################################################################
@ -196,157 +209,162 @@ whitelist.add(r"('', 'out of memory\nout of memory\n', -11)")
parser = OptionParser(usage=usage)
parser.add_option("-r", "--regression", action="store", metavar="REGRESSION_COUNT", help=help,
type="int", dest="regression", default=None)
(OPTIONS, args) = parser.parse_args()
if OPTIONS.regression != None:
# TODO: This should be expanded as we get a better hang of the OOM problems.
# For now, we'll just check that the number of OOMs in one short file does not
# increase.
files = ["../jit-test/tests/arguments/args-createontrace.js"]
# TODO: This should be expanded as we get a better hang of the OOM problems.
# For now, we'll just check that the number of OOMs in one short file does not
# increase.
files = ["../jit-test/tests/arguments/args-createontrace.js"]
else:
files = get_js_files()
files = get_js_files()
# Use a command-line arg to reduce the set of files
if len (args):
files = [f for f in files if f.find(args[0]) != -1]
# Use a command-line arg to reduce the set of files
if len(args):
files = [f for f in files if f.find(args[0]) != -1]
if OPTIONS.regression == None:
# Don't use a logfile, this is automated for tinderbox.
log = file("../OOM_log", "w")
# Don't use a logfile, this is automated for tinderbox.
log = file("../OOM_log", "w")
num_failures = 0
for f in files:
# Run it once to establish boundaries
command = (command_template + ' -O').format(f)
out, err, exit = run(command)
max = re.match(".*OOM max count: (\d+).*", out, flags=re.DOTALL).groups()[0]
max = int(max)
# OOMs don't recover well for the first 20 allocations or so.
# TODO: revisit this.
for i in range(20, max):
# Run it once to establish boundaries
command = (command_template + ' -O').format(f)
out, err, exit = run(command)
max = re.match(".*OOM max count: (\d+).*", out,
flags=re.DOTALL).groups()[0]
max = int(max)
# OOMs don't recover well for the first 20 allocations or so.
# TODO: revisit this.
for i in range(20, max):
if OPTIONS.regression == None:
print("Testing allocation {0}/{1} in {2}".format(i, max, f))
else:
# something short for tinderbox, no space or \n
sys.stdout.write('.')
command = (command_template + ' -A {0}').format(f, i)
out, err, exit = run(command)
# Success (5 is SM's exit code for controlled errors)
if exit == 5 and err.find("out of memory") != -1:
continue
# Failure
else:
if OPTIONS.regression != None:
# Just count them
num_failures += 1
continue
#########################################################################
# The regression tests ends above. The rest of this is for running the
# script manually.
#########################################################################
problem = str((out, err, exit))
if in_blacklist(problem) and problem not in whitelist:
add_to_blacklist(problem)
continue
add_to_blacklist(problem)
# Get valgrind output for a good stack trace
vcommand = "valgrind --dsymutil=yes -q --log-file=OOM_valgrind_log_file " + command
run(vcommand)
vout = file("OOM_valgrind_log_file").read()
vout = clean_voutput(vout)
sans_alloc_sites = remove_failed_allocation_backtraces(vout)
# Don't print duplicate information
if in_blacklist(sans_alloc_sites):
add_to_blacklist(sans_alloc_sites)
continue
add_to_blacklist(sans_alloc_sites)
log.write("\n")
log.write("\n")
log.write(
"=========================================================================")
log.write("\n")
log.write("An allocation failure at\n\tallocation {0}/{1} in {2}\n\t"
"causes problems (detected using bug 624094)"
.format(i, max, f))
log.write("\n")
log.write("\n")
log.write(
"Command (from obj directory, using patch from bug 624094):\n " + command)
log.write("\n")
log.write("\n")
log.write("stdout, stderr, exitcode:\n " + problem)
log.write("\n")
log.write("\n")
double_free = err.find(
"pointer being freed was not allocated") != -1
oom_detected = err.find("out of memory") != -1
multiple_oom_detected = err.find(
"out of memory\nout of memory") != -1
segfault_detected = exit == -11
log.write("Diagnosis: ")
log.write("\n")
if multiple_oom_detected:
log.write(" - Multiple OOMs reported")
log.write("\n")
if segfault_detected:
log.write(" - segfault")
log.write("\n")
if not oom_detected:
log.write(" - No OOM checking")
log.write("\n")
if double_free:
log.write(" - Double free")
log.write("\n")
log.write("\n")
log.write("Valgrind info:\n" + vout)
log.write("\n")
log.write("\n")
log.flush()
if OPTIONS.regression == None:
print("Testing allocation {0}/{1} in {2}".format(i,max,f))
else:
sys.stdout.write('.') # something short for tinderbox, no space or \n
command = (command_template + ' -A {0}').format(f, i)
out, err, exit = run(command)
# Success (5 is SM's exit code for controlled errors)
if exit == 5 and err.find("out of memory") != -1:
continue
# Failure
else:
if OPTIONS.regression != None:
# Just count them
num_failures += 1
continue
#########################################################################
# The regression tests ends above. The rest of this is for running the
# script manually.
#########################################################################
problem = str((out, err, exit))
if in_blacklist(problem) and problem not in whitelist:
add_to_blacklist(problem)
continue
add_to_blacklist(problem)
# Get valgrind output for a good stack trace
vcommand = "valgrind --dsymutil=yes -q --log-file=OOM_valgrind_log_file " + command
run(vcommand)
vout = file("OOM_valgrind_log_file").read()
vout = clean_voutput(vout)
sans_alloc_sites = remove_failed_allocation_backtraces(vout)
# Don't print duplicate information
if in_blacklist(sans_alloc_sites):
add_to_blacklist(sans_alloc_sites)
continue
add_to_blacklist(sans_alloc_sites)
log.write ("\n")
log.write ("\n")
log.write ("=========================================================================")
log.write ("\n")
log.write ("An allocation failure at\n\tallocation {0}/{1} in {2}\n\t"
"causes problems (detected using bug 624094)"
.format(i, max, f))
log.write ("\n")
log.write ("\n")
log.write ("Command (from obj directory, using patch from bug 624094):\n " + command)
log.write ("\n")
log.write ("\n")
log.write ("stdout, stderr, exitcode:\n " + problem)
log.write ("\n")
log.write ("\n")
double_free = err.find("pointer being freed was not allocated") != -1
oom_detected = err.find("out of memory") != -1
multiple_oom_detected = err.find("out of memory\nout of memory") != -1
segfault_detected = exit == -11
log.write ("Diagnosis: ")
log.write ("\n")
if multiple_oom_detected:
log.write (" - Multiple OOMs reported")
log.write ("\n")
if segfault_detected:
log.write (" - segfault")
log.write ("\n")
if not oom_detected:
log.write (" - No OOM checking")
log.write ("\n")
if double_free:
log.write (" - Double free")
log.write ("\n")
log.write ("\n")
log.write ("Valgrind info:\n" + vout)
log.write ("\n")
log.write ("\n")
log.flush()
if OPTIONS.regression == None:
count_lines()
count_lines()
print()
# Do the actual regression check
if OPTIONS.regression != None:
expected_num_failures = OPTIONS.regression
expected_num_failures = OPTIONS.regression
if num_failures != expected_num_failures:
if num_failures != expected_num_failures:
print("TEST-UNEXPECTED-FAIL |", end='')
if num_failures > expected_num_failures:
print("More out-of-memory errors were found ({0}) than expected ({1}). "
"This probably means an allocation site has been added without a "
"NULL-check. If this is unavoidable, you can account for it by "
"updating Makefile.in.".format(num_failures, expected_num_failures),
end='')
print("TEST-UNEXPECTED-FAIL |", end='')
if num_failures > expected_num_failures:
print("More out-of-memory errors were found ({0}) than expected ({1}). "
"This probably means an allocation site has been added without a "
"NULL-check. If this is unavoidable, you can account for it by "
"updating Makefile.in.".format(
num_failures, expected_num_failures),
end='')
else:
print("Congratulations, you have removed {0} out-of-memory error(s) "
"({1} remain)! Please account for it by updating Makefile.in."
.format(expected_num_failures - num_failures, num_failures),
end='')
sys.exit(-1)
else:
print("Congratulations, you have removed {0} out-of-memory error(s) "
"({1} remain)! Please account for it by updating Makefile.in."
.format(expected_num_failures - num_failures, num_failures),
end='')
sys.exit(-1)
else:
print('TEST-PASS | find_OOM_errors | Found the expected number of OOM '
'errors ({0})'.format(expected_num_failures))
print('TEST-PASS | find_OOM_errors | Found the expected number of OOM '
'errors ({0})'.format(expected_num_failures))

View File

@ -2,9 +2,13 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import os, re, string, sys
import os
import re
import string
import sys
from mozbuild.util import FileAvoidWrite
def find_in_path(file, searchpath):
for dir in searchpath.split(os.pathsep):
f = os.path.join(dir, file)
@ -12,6 +16,7 @@ def find_in_path(file, searchpath):
return f
return ''
def header_path(header, compiler):
if compiler == 'gcc':
# we use include_next on gcc
@ -24,6 +29,8 @@ def header_path(header, compiler):
# The 'unused' arg is the output file from the file_generate action. We actually
# generate all the files in header_list
def gen_wrappers(unused, outdir, compiler, template_file, *header_list):
template = open(template_file, 'r').read()

View File

@ -18,165 +18,172 @@ import sys
import shutil
import stat
def _nsinstall_internal(argv):
usage = "usage: %prog [options] arg1 [arg2 ...] target-directory"
p = OptionParser(usage=usage)
usage = "usage: %prog [options] arg1 [arg2 ...] target-directory"
p = OptionParser(usage=usage)
p.add_option('-D', action="store_true",
help="Create a single directory only")
p.add_option('-t', action="store_true",
help="Preserve time stamp")
p.add_option('-m', action="store",
help="Set mode", metavar="mode")
p.add_option('-d', action="store_true",
help="Create directories in target")
p.add_option('-R', action="store_true",
help="Use relative symbolic links (ignored)")
p.add_option('-L', action="store", metavar="linkprefix",
help="Link prefix (ignored)")
p.add_option('-X', action="append", metavar="file",
help="Ignore a file when installing a directory recursively.")
p.add_option('-D', action="store_true",
help="Create a single directory only")
p.add_option('-t', action="store_true",
help="Preserve time stamp")
p.add_option('-m', action="store",
help="Set mode", metavar="mode")
p.add_option('-d', action="store_true",
help="Create directories in target")
p.add_option('-R', action="store_true",
help="Use relative symbolic links (ignored)")
p.add_option('-L', action="store", metavar="linkprefix",
help="Link prefix (ignored)")
p.add_option('-X', action="append", metavar="file",
help="Ignore a file when installing a directory recursively.")
# The remaining arguments are not used in our tree, thus they're not
# implented.
def BadArg(option, opt, value, parser):
parser.error('option not supported: {0}'.format(opt))
# The remaining arguments are not used in our tree, thus they're not
# implented.
def BadArg(option, opt, value, parser):
parser.error('option not supported: {0}'.format(opt))
p.add_option('-C', action="callback", metavar="CWD",
callback=BadArg,
help="NOT SUPPORTED")
p.add_option('-o', action="callback", callback=BadArg,
help="Set owner (NOT SUPPORTED)", metavar="owner")
p.add_option('-g', action="callback", callback=BadArg,
help="Set group (NOT SUPPORTED)", metavar="group")
p.add_option('-C', action="callback", metavar="CWD",
callback=BadArg,
help="NOT SUPPORTED")
p.add_option('-o', action="callback", callback=BadArg,
help="Set owner (NOT SUPPORTED)", metavar="owner")
p.add_option('-g', action="callback", callback=BadArg,
help="Set group (NOT SUPPORTED)", metavar="group")
(options, args) = p.parse_args(argv)
(options, args) = p.parse_args(argv)
if options.m:
# mode is specified
try:
options.m = int(options.m, 8)
except:
sys.stderr.write('nsinstall: {0} is not a valid mode\n'
.format(options.m))
return 1
if options.m:
# mode is specified
try:
options.m = int(options.m, 8)
except:
sys.stderr.write('nsinstall: {0} is not a valid mode\n'
.format(options.m))
return 1
# just create one directory?
def maybe_create_dir(dir, mode, try_again):
dir = os.path.abspath(dir)
if os.path.exists(dir):
if not os.path.isdir(dir):
print('nsinstall: {0} is not a directory'.format(dir), file=sys.stderr)
return 1
if mode:
os.chmod(dir, mode)
return 0
# just create one directory?
def maybe_create_dir(dir, mode, try_again):
dir = os.path.abspath(dir)
if os.path.exists(dir):
if not os.path.isdir(dir):
print('nsinstall: {0} is not a directory'.format(dir), file=sys.stderr)
return 1
if mode:
os.chmod(dir, mode)
return 0
try:
if mode:
os.makedirs(dir, mode)
else:
os.makedirs(dir)
except Exception as e:
# We might have hit EEXIST due to a race condition (see bug 463411) -- try again once
if try_again:
return maybe_create_dir(dir, mode, False)
print("nsinstall: failed to create directory {0}: {1}".format(dir, e))
return 1
else:
return 0
if options.X:
options.X = [os.path.abspath(p) for p in options.X]
if options.D:
return maybe_create_dir(args[0], options.m, True)
# nsinstall arg1 [...] directory
if len(args) < 2:
p.error('not enough arguments')
def copy_all_entries(entries, target):
for e in entries:
e = os.path.abspath(e)
if options.X and e in options.X:
continue
dest = os.path.join(target, os.path.basename(e))
dest = os.path.abspath(dest)
handleTarget(e, dest)
if options.m:
os.chmod(dest, options.m)
# set up handler
if options.d:
# we're supposed to create directories
def handleTarget(srcpath, targetpath):
# target directory was already created, just use mkdir
os.mkdir(targetpath)
else:
# we're supposed to copy files
def handleTarget(srcpath, targetpath):
if os.path.isdir(srcpath):
if not os.path.exists(targetpath):
os.mkdir(targetpath)
entries = [os.path.join(srcpath, e) for e in os.listdir(srcpath)]
copy_all_entries(entries, targetpath)
# options.t is not relevant for directories
if options.m:
os.chmod(targetpath, options.m)
else:
if os.path.exists(targetpath):
if sys.platform == "win32":
mozfile.remove(targetpath)
else:
os.remove(targetpath)
if options.t:
shutil.copy2(srcpath, targetpath)
try:
if mode:
os.makedirs(dir, mode)
else:
os.makedirs(dir)
except Exception as e:
# We might have hit EEXIST due to a race condition (see bug 463411) -- try again once
if try_again:
return maybe_create_dir(dir, mode, False)
print(
"nsinstall: failed to create directory {0}: {1}".format(dir, e))
return 1
else:
shutil.copy(srcpath, targetpath)
return 0
# the last argument is the target directory
target = args.pop()
# ensure target directory (importantly, we do not apply a mode to the directory
# because we want to copy files into it and the mode might be read-only)
rv = maybe_create_dir(target, None, True)
if rv != 0:
return rv
if options.X:
options.X = [os.path.abspath(p) for p in options.X]
copy_all_entries(args, target)
return 0
if options.D:
return maybe_create_dir(args[0], options.m, True)
# nsinstall arg1 [...] directory
if len(args) < 2:
p.error('not enough arguments')
def copy_all_entries(entries, target):
for e in entries:
e = os.path.abspath(e)
if options.X and e in options.X:
continue
dest = os.path.join(target, os.path.basename(e))
dest = os.path.abspath(dest)
handleTarget(e, dest)
if options.m:
os.chmod(dest, options.m)
# set up handler
if options.d:
# we're supposed to create directories
def handleTarget(srcpath, targetpath):
# target directory was already created, just use mkdir
os.mkdir(targetpath)
else:
# we're supposed to copy files
def handleTarget(srcpath, targetpath):
if os.path.isdir(srcpath):
if not os.path.exists(targetpath):
os.mkdir(targetpath)
entries = [os.path.join(srcpath, e)
for e in os.listdir(srcpath)]
copy_all_entries(entries, targetpath)
# options.t is not relevant for directories
if options.m:
os.chmod(targetpath, options.m)
else:
if os.path.exists(targetpath):
if sys.platform == "win32":
mozfile.remove(targetpath)
else:
os.remove(targetpath)
if options.t:
shutil.copy2(srcpath, targetpath)
else:
shutil.copy(srcpath, targetpath)
# the last argument is the target directory
target = args.pop()
# ensure target directory (importantly, we do not apply a mode to the directory
# because we want to copy files into it and the mode might be read-only)
rv = maybe_create_dir(target, None, True)
if rv != 0:
return rv
copy_all_entries(args, target)
return 0
# nsinstall as a native command is always UTF-8
def nsinstall(argv):
return _nsinstall_internal([unicode(arg, "utf-8") for arg in argv])
return _nsinstall_internal([unicode(arg, "utf-8") for arg in argv])
if __name__ == '__main__':
# sys.argv corrupts characters outside the system code page on Windows
# <http://bugs.python.org/issue2128>. Use ctypes instead. This is also
# useful because switching to Unicode strings makes python use the wide
# Windows APIs, which is what we want here since the wide APIs normally do a
# better job at handling long paths and such.
if sys.platform == "win32":
import ctypes
from ctypes import wintypes
GetCommandLine = ctypes.windll.kernel32.GetCommandLineW
GetCommandLine.argtypes = []
GetCommandLine.restype = wintypes.LPWSTR
# sys.argv corrupts characters outside the system code page on Windows
# <http://bugs.python.org/issue2128>. Use ctypes instead. This is also
# useful because switching to Unicode strings makes python use the wide
# Windows APIs, which is what we want here since the wide APIs normally do a
# better job at handling long paths and such.
if sys.platform == "win32":
import ctypes
from ctypes import wintypes
GetCommandLine = ctypes.windll.kernel32.GetCommandLineW
GetCommandLine.argtypes = []
GetCommandLine.restype = wintypes.LPWSTR
CommandLineToArgv = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgv.argtypes = [wintypes.LPWSTR, ctypes.POINTER(ctypes.c_int)]
CommandLineToArgv.restype = ctypes.POINTER(wintypes.LPWSTR)
CommandLineToArgv = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgv.argtypes = [
wintypes.LPWSTR, ctypes.POINTER(ctypes.c_int)]
CommandLineToArgv.restype = ctypes.POINTER(wintypes.LPWSTR)
argc = ctypes.c_int(0)
argv_arr = CommandLineToArgv(GetCommandLine(), ctypes.byref(argc))
# The first argv will be "python", the second will be the .py file
argv = argv_arr[1:argc.value]
else:
# For consistency, do it on Unix as well
if sys.stdin.encoding is not None:
argv = [unicode(arg, sys.stdin.encoding) for arg in sys.argv]
argc = ctypes.c_int(0)
argv_arr = CommandLineToArgv(GetCommandLine(), ctypes.byref(argc))
# The first argv will be "python", the second will be the .py file
argv = argv_arr[1:argc.value]
else:
argv = [unicode(arg) for arg in sys.argv]
# For consistency, do it on Unix as well
if sys.stdin.encoding is not None:
argv = [unicode(arg, sys.stdin.encoding) for arg in sys.argv]
else:
argv = [unicode(arg) for arg in sys.argv]
sys.exit(_nsinstall_internal(argv[1:]))
sys.exit(_nsinstall_internal(argv[1:]))

View File

@ -14,18 +14,20 @@ from __future__ import print_function
import sys
import re
def get_prerelease_suffix(version):
""" Returns the prerelease suffix from the version string argument """
def mfunc(m):
return " {0} {1} {2}".format(m.group('prefix'),
{'a': 'Alpha', 'b': 'Beta'}[m.group('c')],
m.group('suffix'))
result, c = re.subn(r'^(?P<prefix>(\d+\.)*\d+)(?P<c>[ab])(?P<suffix>\d+)$',
mfunc, version)
if c != 1:
return ''
return result
def get_prerelease_suffix(version):
""" Returns the prerelease suffix from the version string argument """
def mfunc(m):
return " {0} {1} {2}".format(m.group('prefix'),
{'a': 'Alpha', 'b': 'Beta'}[m.group('c')],
m.group('suffix'))
result, c = re.subn(r'^(?P<prefix>(\d+\.)*\d+)(?P<c>[ab])(?P<suffix>\d+)$',
mfunc, version)
if c != 1:
return ''
return result
if len(sys.argv) == 2:
print(get_prerelease_suffix(sys.argv[1]))
print(get_prerelease_suffix(sys.argv[1]))

View File

@ -47,10 +47,12 @@ def main(args):
execfile(script, frozenglobals)
# Freeze scope here ... why this makes things work I have no idea ...
frozenglobals = globals()
import sys, os
import sys
import os
if __name__ == '__main__':
main(sys.argv[1:])

View File

@ -5,6 +5,7 @@
import os
import errno
def mtime(path):
try:
return os.stat(path).st_mtime
@ -13,6 +14,7 @@ def mtime(path):
return -1
raise
def rebuild_check(args):
target = args[0]
deps = args[1:]
@ -31,14 +33,17 @@ def rebuild_check(args):
newer.append(dep)
if newer and removed:
print 'Rebuilding %s because %s changed and %s was removed' % (target, ', '.join(newer), ', '.join(removed))
print 'Rebuilding %s because %s changed and %s was removed' % (
target, ', '.join(newer), ', '.join(removed))
elif newer:
print 'Rebuilding %s because %s changed' % (target, ', '.join(newer))
elif removed:
print 'Rebuilding %s because %s was removed' % (target, ', '.join(removed))
print 'Rebuilding %s because %s was removed' % (
target, ', '.join(removed))
else:
print 'Rebuilding %s for an unknown reason' % target
if __name__ == '__main__':
import sys
rebuild_check(sys.argv[1:])

View File

@ -6,8 +6,10 @@ import buildconfig
import subprocess
import sys
def main(output, lib_file, *scripts):
for script in scripts:
retcode = subprocess.call([sys.executable, script], cwd=buildconfig.topsrcdir)
retcode = subprocess.call(
[sys.executable, script], cwd=buildconfig.topsrcdir)
if retcode != 0:
raise Exception(script + " failed")

View File

@ -68,7 +68,6 @@ class TestMozbuildReading(unittest.TestCase):
self.assertEqual(set(paths.keys()), all_paths)
self.assertGreaterEqual(len(contexts), len(paths))
def test_orphan_file_patterns(self):
if sys.platform == 'win32':
raise unittest.SkipTest('failing on windows builds')
@ -111,5 +110,6 @@ class TestMozbuildReading(unittest.TestCase):
"Please update this entry." %
(p, ctx.main_path))
if __name__ == '__main__':
main()

View File

@ -8,12 +8,13 @@ from mozunit import main, MockedOpen
import unittest
from tempfile import mkstemp
class TestMozUnit(unittest.TestCase):
def test_mocked_open(self):
# Create a temporary file on the file system.
(fd, path) = mkstemp()
with os.fdopen(fd, 'w') as file:
file.write('foobar');
file.write('foobar')
self.assertFalse(os.path.exists('file1'))
self.assertFalse(os.path.exists('file2'))
@ -82,5 +83,6 @@ class TestMozUnit(unittest.TestCase):
# created.
self.assertRaises(IOError, open, 'file3', 'r')
if __name__ == "__main__":
main()

View File

@ -1,6 +1,9 @@
import unittest
import os, sys, os.path, time
import os
import sys
import os.path
import time
from tempfile import mkdtemp
from shutil import rmtree
import mozunit
@ -17,10 +20,12 @@ RUN_NON_ASCII_TESTS = (sys.platform == "win32" or
(sys.stdin.encoding is not None and
codecs.lookup(sys.stdin.encoding) == codecs.lookup("utf-8")))
class TestNsinstall(unittest.TestCase):
"""
Unit tests for nsinstall.py
"""
def setUp(self):
self.tmpdir = mkdtemp()
@ -80,7 +85,8 @@ class TestNsinstall(unittest.TestCase):
self.assert_(os.path.isfile(os.path.join(testdir, "testfile")))
self.assert_(not os.path.exists(os.path.join(testdir, "Xfile")))
self.assert_(os.path.isdir(os.path.join(testdir, "copieddir")))
self.assert_(os.path.isfile(os.path.join(testdir, "copieddir", "testfile2")))
self.assert_(os.path.isfile(os.path.join(
testdir, "copieddir", "testfile2")))
self.assert_(not os.path.exists(os.path.join(testdir, "Xdir")))
def test_nsinstall_multiple(self):
@ -168,7 +174,8 @@ class TestNsinstall(unittest.TestCase):
destfile = os.path.join(testdir, filename)
self.assert_(os.path.isfile(destfile))
#TODO: implement -R, -l, -L and test them!
# TODO: implement -R, -l, -L and test them!
if __name__ == '__main__':
mozunit.main()
mozunit.main()

View File

@ -6,75 +6,77 @@ import mozunit
from printprereleasesuffix import get_prerelease_suffix
class TestGetPreReleaseSuffix(unittest.TestCase):
"""
Unit tests for the get_prerelease_suffix function
"""
"""
Unit tests for the get_prerelease_suffix function
"""
def test_alpha_1(self):
"""test 1a1 version string"""
self.c = get_prerelease_suffix('1a1')
self.assertEqual(self.c, ' 1 Alpha 1')
def test_alpha_1(self):
"""test 1a1 version string"""
self.c = get_prerelease_suffix('1a1')
self.assertEqual(self.c, ' 1 Alpha 1')
def test_alpha_10(self):
"""test 1.2a10 version string"""
self.c = get_prerelease_suffix('1.2a10')
self.assertEqual(self.c, ' 1.2 Alpha 10')
def test_alpha_10(self):
"""test 1.2a10 version string"""
self.c = get_prerelease_suffix('1.2a10')
self.assertEqual(self.c, ' 1.2 Alpha 10')
def test_beta_3(self):
"""test 1.2.3b3 version string"""
self.c = get_prerelease_suffix('1.2.3b3')
self.assertEqual(self.c, ' 1.2.3 Beta 3')
def test_beta_3(self):
"""test 1.2.3b3 version string"""
self.c = get_prerelease_suffix('1.2.3b3')
self.assertEqual(self.c, ' 1.2.3 Beta 3')
def test_beta_30(self):
"""test 1.2.3.4b30 version string"""
self.c = get_prerelease_suffix('1.2.3.4b30')
self.assertEqual(self.c, ' 1.2.3.4 Beta 30')
def test_beta_30(self):
"""test 1.2.3.4b30 version string"""
self.c = get_prerelease_suffix('1.2.3.4b30')
self.assertEqual(self.c, ' 1.2.3.4 Beta 30')
def test_release_1(self):
"""test 1.2.3.4 version string"""
self.c = get_prerelease_suffix('1.2.3.4')
self.assertEqual(self.c, '')
def test_release_1(self):
"""test 1.2.3.4 version string"""
self.c = get_prerelease_suffix('1.2.3.4')
self.assertEqual(self.c, '')
def test_alpha_1_pre(self):
"""test 1.2a1pre version string"""
self.c = get_prerelease_suffix('1.2a1pre')
self.assertEqual(self.c, '')
def test_alpha_1_pre(self):
"""test 1.2a1pre version string"""
self.c = get_prerelease_suffix('1.2a1pre')
self.assertEqual(self.c, '')
def test_beta_10_pre(self):
"""test 3.4b10pre version string"""
self.c = get_prerelease_suffix('3.4b10pre')
self.assertEqual(self.c, '')
def test_beta_10_pre(self):
"""test 3.4b10pre version string"""
self.c = get_prerelease_suffix('3.4b10pre')
self.assertEqual(self.c, '')
def test_pre_0(self):
"""test 1.2pre0 version string"""
self.c = get_prerelease_suffix('1.2pre0')
self.assertEqual(self.c, '')
def test_pre_0(self):
"""test 1.2pre0 version string"""
self.c = get_prerelease_suffix('1.2pre0')
self.assertEqual(self.c, '')
def test_pre_1_b(self):
"""test 1.2pre1b version string"""
self.c = get_prerelease_suffix('1.2pre1b')
self.assertEqual(self.c, '')
def test_pre_1_b(self):
"""test 1.2pre1b version string"""
self.c = get_prerelease_suffix('1.2pre1b')
self.assertEqual(self.c, '')
def test_a_a(self):
"""test 1.2aa version string"""
self.c = get_prerelease_suffix('1.2aa')
self.assertEqual(self.c, '')
def test_a_a(self):
"""test 1.2aa version string"""
self.c = get_prerelease_suffix('1.2aa')
self.assertEqual(self.c, '')
def test_b_b(self):
"""test 1.2bb version string"""
self.c = get_prerelease_suffix('1.2bb')
self.assertEqual(self.c, '')
def test_b_b(self):
"""test 1.2bb version string"""
self.c = get_prerelease_suffix('1.2bb')
self.assertEqual(self.c, '')
def test_a_b(self):
"""test 1.2ab version string"""
self.c = get_prerelease_suffix('1.2ab')
self.assertEqual(self.c, '')
def test_a_b(self):
"""test 1.2ab version string"""
self.c = get_prerelease_suffix('1.2ab')
self.assertEqual(self.c, '')
def test_plus(self):
"""test 1.2+ version string """
self.c = get_prerelease_suffix('1.2+')
self.assertEqual(self.c, '')
def test_plus(self):
"""test 1.2+ version string """
self.c = get_prerelease_suffix('1.2+')
self.assertEqual(self.c, '')
if __name__ == '__main__':
mozunit.main()
mozunit.main()

View File

@ -28,153 +28,160 @@ The content written to the jars is pseudorandom with a fixed seed.
'''
if not __file__:
__file__ = sys.argv[0]
__file__ = sys.argv[0]
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from MozZipFile import ZipFile
import zipfile
leafs = (
'firstdir/oneleaf',
'seconddir/twoleaf',
'thirddir/with/sub/threeleaf')
'firstdir/oneleaf',
'seconddir/twoleaf',
'thirddir/with/sub/threeleaf')
_lengths = map(lambda n: n * 64, [16, 64, 80])
lengths = 3
writes = 5
def givenlength(i):
'''Return a length given in the _lengths array to allow manual
tuning of which lengths of zip entries to use.
'''
return _lengths[i]
'''Return a length given in the _lengths array to allow manual
tuning of which lengths of zip entries to use.
'''
return _lengths[i]
def prod(*iterables):
''''Tensor product of a list of iterables.
''''Tensor product of a list of iterables.
This generator returns lists of items, one of each given
iterable. It iterates over all possible combinations.
'''
for item in iterables[0]:
if len(iterables) == 1:
yield [item]
else:
for others in prod(*iterables[1:]):
yield [item] + others
This generator returns lists of items, one of each given
iterable. It iterates over all possible combinations.
'''
for item in iterables[0]:
if len(iterables) == 1:
yield [item]
else:
for others in prod(*iterables[1:]):
yield [item] + others
def getid(descs):
'Convert a list of ints to a string.'
return reduce(lambda x,y: x+'{0}{1}'.format(*tuple(y)), descs,'')
'Convert a list of ints to a string.'
return reduce(lambda x, y: x+'{0}{1}'.format(*tuple(y)), descs, '')
def getContent(length):
'Get pseudo random content of given length.'
rv = [None] * length
for i in xrange(length):
rv[i] = random.choice(letters)
return ''.join(rv)
'Get pseudo random content of given length.'
rv = [None] * length
for i in xrange(length):
rv[i] = random.choice(letters)
return ''.join(rv)
def createWriter(sizer, *items):
'Helper method to fill in tests, one set of writes, one for each item'
locitems = copy.deepcopy(items)
for item in locitems:
item['length'] = sizer(item.pop('length', 0))
def helper(self):
mode = 'w'
if os.path.isfile(self.f):
mode = 'a'
zf = ZipFile(self.f, mode, self.compression)
'Helper method to fill in tests, one set of writes, one for each item'
locitems = copy.deepcopy(items)
for item in locitems:
self._write(zf, **item)
zf = None
pass
return helper
item['length'] = sizer(item.pop('length', 0))
def helper(self):
mode = 'w'
if os.path.isfile(self.f):
mode = 'a'
zf = ZipFile(self.f, mode, self.compression)
for item in locitems:
self._write(zf, **item)
zf = None
pass
return helper
def createTester(name, *writes):
'''Helper method to fill in tests, calls into a list of write
helper methods.
'''
_writes = copy.copy(writes)
def tester(self):
for w in _writes:
getattr(self, w)()
self._verifyZip()
pass
# unit tests get confused if the method name isn't test...
tester.__name__ = name
return tester
'''Helper method to fill in tests, calls into a list of write
helper methods.
'''
_writes = copy.copy(writes)
def tester(self):
for w in _writes:
getattr(self, w)()
self._verifyZip()
pass
# unit tests get confused if the method name isn't test...
tester.__name__ = name
return tester
class TestExtensiveStored(unittest.TestCase):
'''Unit tests for MozZipFile
'''Unit tests for MozZipFile
The testcase are actually populated by code following the class
definition.
'''
stage = "mozzipfilestage"
compression = zipfile.ZIP_STORED
The testcase are actually populated by code following the class
definition.
'''
stage = "mozzipfilestage"
compression = zipfile.ZIP_STORED
def leaf(self, *leafs):
return os.path.join(self.stage, *leafs)
def setUp(self):
if os.path.exists(self.stage):
shutil.rmtree(self.stage)
os.mkdir(self.stage)
self.f = self.leaf('test.jar')
self.ref = {}
self.seed = 0
def tearDown(self):
self.f = None
self.ref = None
def _verifyZip(self):
zf = zipfile.ZipFile(self.f)
badEntry = zf.testzip()
self.failIf(badEntry, badEntry)
zlist = zf.namelist()
zlist.sort()
vlist = self.ref.keys()
vlist.sort()
self.assertEqual(zlist, vlist)
for leaf, content in self.ref.iteritems():
zcontent = zf.read(leaf)
self.assertEqual(content, zcontent)
def _write(self, zf, seed=None, leaf=0, length=0):
if seed is None:
seed = self.seed
self.seed += 1
random.seed(seed)
leaf = leafs[leaf]
content = getContent(length)
self.ref[leaf] = content
zf.writestr(leaf, content)
dir = os.path.dirname(self.leaf('stage', leaf))
if not os.path.isdir(dir):
os.makedirs(dir)
open(self.leaf('stage', leaf), 'w').write(content)
def leaf(self, *leafs):
return os.path.join(self.stage, *leafs)
def setUp(self):
if os.path.exists(self.stage):
shutil.rmtree(self.stage)
os.mkdir(self.stage)
self.f = self.leaf('test.jar')
self.ref = {}
self.seed = 0
def tearDown(self):
self.f = None
self.ref = None
def _verifyZip(self):
zf = zipfile.ZipFile(self.f)
badEntry = zf.testzip()
self.failIf(badEntry, badEntry)
zlist = zf.namelist()
zlist.sort()
vlist = self.ref.keys()
vlist.sort()
self.assertEqual(zlist, vlist)
for leaf, content in self.ref.iteritems():
zcontent = zf.read(leaf)
self.assertEqual(content, zcontent)
def _write(self, zf, seed=None, leaf=0, length=0):
if seed is None:
seed = self.seed
self.seed += 1
random.seed(seed)
leaf = leafs[leaf]
content = getContent(length)
self.ref[leaf] = content
zf.writestr(leaf, content)
dir = os.path.dirname(self.leaf('stage', leaf))
if not os.path.isdir(dir):
os.makedirs(dir)
open(self.leaf('stage', leaf), 'w').write(content)
# all leafs in all lengths
atomics = list(prod(xrange(len(leafs)), xrange(lengths)))
# populate TestExtensiveStore with testcases
for w in xrange(writes):
# Don't iterate over all files for the the first n passes,
# those are redundant as long as w < lengths.
# There are symmetries in the trailing end, too, but I don't know
# how to reduce those out right now.
nonatomics = [list(prod(range(min(i,len(leafs))), xrange(lengths)))
for i in xrange(1, w+1)] + [atomics]
for descs in prod(*nonatomics):
suffix = getid(descs)
dicts = [dict(leaf=leaf, length=length) for leaf, length in descs]
setattr(TestExtensiveStored, '_write' + suffix,
createWriter(givenlength, *dicts))
setattr(TestExtensiveStored, 'test' + suffix,
createTester('test' + suffix, '_write' + suffix))
# Don't iterate over all files for the the first n passes,
# those are redundant as long as w < lengths.
# There are symmetries in the trailing end, too, but I don't know
# how to reduce those out right now.
nonatomics = [list(prod(range(min(i, len(leafs))), xrange(lengths)))
for i in xrange(1, w+1)] + [atomics]
for descs in prod(*nonatomics):
suffix = getid(descs)
dicts = [dict(leaf=leaf, length=length) for leaf, length in descs]
setattr(TestExtensiveStored, '_write' + suffix,
createWriter(givenlength, *dicts))
setattr(TestExtensiveStored, 'test' + suffix,
createTester('test' + suffix, '_write' + suffix))
# now create another round of tests, with two writing passes
# first, write all file combinations into the jar, close it,
@ -182,20 +189,22 @@ for w in xrange(writes):
# This should catch more or less all artifacts generated
# by the final ordering step when closing the jar.
files = [list(prod([i], xrange(lengths))) for i in xrange(len(leafs))]
allfiles = reduce(lambda l,r:l+r,
allfiles = reduce(lambda l, r: l+r,
[list(prod(*files[:(i+1)])) for i in xrange(len(leafs))])
for first in allfiles:
testbasename = 'test{0}_'.format(getid(first))
test = [None, '_write' + getid(first), None]
for second in atomics:
test[0] = testbasename + getid([second])
test[2] = '_write' + getid([second])
setattr(TestExtensiveStored, test[0], createTester(*test))
testbasename = 'test{0}_'.format(getid(first))
test = [None, '_write' + getid(first), None]
for second in atomics:
test[0] = testbasename + getid([second])
test[2] = '_write' + getid([second])
setattr(TestExtensiveStored, test[0], createTester(*test))
class TestExtensiveDeflated(TestExtensiveStored):
'Test all that has been tested with ZIP_STORED with DEFLATED, too.'
compression = zipfile.ZIP_DEFLATED
'Test all that has been tested with ZIP_STORED with DEFLATED, too.'
compression = zipfile.ZIP_DEFLATED
if __name__ == '__main__':
unittest.main()
unittest.main()