Bug 725500 - Eliminate various jstest freezes on Unix; r=dmandelin

This is a rewrite of the jstest loop for Unix to prevent a freeze
that is possible here. See the writeup in the bug for details.

--HG--
extra : rebase_source : 8a7c80cb7ee044d909e29c669648cfc67e096c3b
This commit is contained in:
Terrence Cole 2012-02-14 11:24:48 -08:00
parent ee9d9444cf
commit e5a53642c9
6 changed files with 504 additions and 286 deletions

View File

@ -2,12 +2,21 @@
# Test harness for JSTests, controlled by manifest files.
import datetime, os, sys, subprocess
from subprocess import *
import datetime, os, sys
from subprocess import list2cmdline
from tests import TestResult, NullTestOutput
from workers import Source
from results import NullTestOutput
from tests import TestCase
from tasks_win import Source
from progressbar import ProgressBar
from results import ResultsSink
if (sys.platform.startswith('linux') or
sys.platform.startswith('darwin')
):
from tasks_unix import run_all_tests
else:
from tasks_win import run_all_tests
def exclude_tests(test_list, exclude_files):
exclude_paths = []
@ -19,168 +28,6 @@ def exclude_tests(test_list, exclude_files):
exclude_paths.append(line)
return [ _ for _ in test_list if _.path not in exclude_paths ]
def check_manifest(test_list):
test_set = set([ _.path for _ in test_list ])
missing = []
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if dirpath == '.': continue
if not filename.endswith('.js'): continue
if filename in ('browser.js', 'shell.js', 'jsref.js', 'template.js'): continue
path = os.path.join(dirpath, filename)
if path.startswith('./'):
path = path[2:]
if path not in test_set:
missing.append(path)
if missing:
print "Test files not contained in any manifest:"
for path in missing:
print path
else:
print 'All test files are listed in manifests'
def print_tinderbox_result(label, path, message=None, skip=False, time=None):
result = label
result += " | " + path
result += " |" + OPTIONS.shell_args
if message:
result += " | " + message
if skip:
result += ' | (SKIP)'
if time > OPTIONS.timeout:
result += ' | (TIMEOUT)'
print result
class TestTask:
js_cmd_prefix = None
def __init__(self, test):
self.test = test
def __call__(self):
if self.test.enable or OPTIONS.run_skipped:
return self.test.run(self.js_cmd_prefix, OPTIONS.timeout)
else:
return NullTestOutput(self.test)
def __str__(self):
return str(self.test)
@classmethod
def set_js_cmd_prefix(self, js_path, js_args, debugger_prefix):
parts = []
if debugger_prefix:
parts += debugger_prefix
parts.append(js_path)
if js_args:
parts += js_args
self.js_cmd_prefix = parts
class ResultsSink:
output_file = None
def __init__(self):
self.groups = {}
self.counts = [ 0, 0, 0 ]
self.n = 0
self.finished = False
self.pb = None
def push(self, output):
if isinstance(output, NullTestOutput):
if OPTIONS.tinderbox:
print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
self.counts[2] += 1
self.n += 1
else:
if OPTIONS.show_cmd:
print >> self.output_file, subprocess.list2cmdline(output.cmd)
if OPTIONS.show_output:
print >> self.output_file, ' rc = %d, run time = %f' % (output.rc, output.dt)
self.output_file.write(output.out)
self.output_file.write(output.err)
result = TestResult.from_output(output)
tup = (result.result, result.test.expect, result.test.random)
dev_label = self.LABELS[tup][1]
if output.timed_out:
dev_label = 'TIMEOUTS'
self.groups.setdefault(dev_label, []).append(result.test.path)
self.n += 1
if result.result == TestResult.PASS and not result.test.random:
self.counts[0] += 1
elif result.test.expect and not result.test.random:
self.counts[1] += 1
else:
self.counts[2] += 1
if OPTIONS.tinderbox:
if len(result.results) > 1:
for sub_ok, msg in result.results:
label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
if label == 'TEST-UNEXPECTED-PASS':
label = 'TEST-PASS (EXPECTED RANDOM)'
print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
print_tinderbox_result(self.LABELS[
(result.result, result.test.expect, result.test.random)][0],
result.test.path, time=output.dt)
if self.pb:
self.pb.label = '[%4d|%4d|%4d]'%tuple(self.counts)
self.pb.update(self.n)
# Conceptually, this maps (test result x test expection) to text labels.
# key is (result, expect, random)
# value is (tinderbox label, dev test category)
LABELS = {
(TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''),
(TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
(TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
(TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'),
(TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''),
(TestResult.PASS, True, False): ('TEST-PASS', ''),
(TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''),
}
def list(self):
for label, paths in sorted(self.groups.items()):
if label == '': continue
print label
for path in paths:
print ' %s'%path
if OPTIONS.failure_file:
failure_file = open(OPTIONS.failure_file, 'w')
if not self.all_passed():
for path in self.groups['REGRESSIONS'] + self.groups['TIMEOUTS']:
print >> failure_file, path
failure_file.close()
suffix = '' if self.finished else ' (partial run -- interrupted by user)'
if self.all_passed():
print 'PASS' + suffix
else:
print 'FAIL' + suffix
def all_passed(self):
return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
def run_tests(tests, results):
"""Run the given tests, sending raw results to the given results accumulator."""
pb = None
@ -192,17 +39,18 @@ def run_tests(tests, results):
pass
results.pb = pb
test_list = [ TestTask(test) for test in tests ]
pipeline = Source(test_list, results, False)
results.finished = pipeline.start(OPTIONS.worker_count)
try:
results.finished = run_all_tests(tests, results, OPTIONS)
except KeyboardInterrupt:
results.finished = False
if pb:
if pb:
pb.finish()
if not OPTIONS.tinderbox:
results.list()
if __name__ == '__main__':
if __name__ == '__main__':
from optparse import OptionParser
op = OptionParser(usage='%prog JS_SHELL [TEST-SPECS]')
op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
@ -272,18 +120,18 @@ if __name__ == '__main__':
if OPTIONS.valgrind_args:
debugger_prefix.append(OPTIONS.valgrind_args)
# Running under valgrind is not very useful if we don't show results.
OPTIONS.show_output = True
OPTIONS.show_output = True
else:
debugger_prefix = []
TestTask.set_js_cmd_prefix(JS, OPTIONS.shell_args.split(), debugger_prefix)
TestCase.set_js_cmd_prefix(JS, OPTIONS.shell_args.split(), debugger_prefix)
output_file = sys.stdout
if OPTIONS.output_file and (OPTIONS.show_cmd or OPTIONS.show_output):
output_file = open(OPTIONS.output_file, 'w')
ResultsSink.output_file = output_file
if ((OPTIONS.show_cmd or OPTIONS.show_output) and
if ((OPTIONS.show_cmd or OPTIONS.show_output) and
output_file == sys.stdout or OPTIONS.tinderbox):
OPTIONS.hide_progress = True
@ -307,9 +155,10 @@ if __name__ == '__main__':
xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
xul_tester = manifest.XULInfoTester(xul_info, JS)
test_list = manifest.parse(OPTIONS.manifest, xul_tester)
skipped_list = []
if OPTIONS.check_manifest:
check_manifest(test_list)
manifest.check_manifest(test_list)
if JS is None:
sys.exit()
@ -344,35 +193,44 @@ if __name__ == '__main__':
if not OPTIONS.run_slow_tests:
test_list = [ _ for _ in test_list if not _.slow ]
if OPTIONS.debug and test_list:
if not OPTIONS.run_skipped:
skipped_list = [ _ for _ in test_list if not _.enable ]
test_list = [ _ for _ in test_list if _.enable ]
if not test_list:
print 'no tests selected'
sys.exit(1)
if OPTIONS.debug:
if len(test_list) > 1:
print('Multiple tests match command line arguments, debugger can only run one')
for tc in test_list:
print(' %s'%tc.path)
sys.exit(2)
cmd = test_list[0].get_command(TestTask.js_cmd_prefix)
cmd = test_list[0].get_command(TestCase.js_cmd_prefix)
if OPTIONS.show_cmd:
print subprocess.list2cmdline(cmd)
print list2cmdline(cmd)
manifest_dir = os.path.dirname(OPTIONS.manifest)
if manifest_dir not in ('', '.'):
os.chdir(os.path.dirname(OPTIONS.manifest))
os.chdir(manifest_dir)
call(cmd)
sys.exit()
curdir = os.getcwd()
manifest_dir = os.path.dirname(OPTIONS.manifest)
if manifest_dir not in ('', '.'):
os.chdir(manifest_dir)
results = None
if not test_list:
print 'no tests selected'
else:
curdir = os.getcwd()
manifest_dir = os.path.dirname(OPTIONS.manifest)
if manifest_dir not in ('', '.'):
os.chdir(os.path.dirname(OPTIONS.manifest))
try:
results = ResultsSink()
run_tests(test_list, results)
finally:
os.chdir(curdir)
try:
results = ResultsSink(output_file, OPTIONS)
run_tests(test_list, results)
finally:
os.chdir(curdir)
for t in skipped_list:
results.push(NullTestOutput(t))
if output_file != sys.stdout:
output_file.close()

View File

@ -55,7 +55,8 @@ class XULInfo:
break
if path == None:
print "Can't find config/autoconf.mk on a directory containing the JS shell (searched from %s)"%jsdir
print ("Can't find config/autoconf.mk on a directory containing the JS shell"
" (searched from %s)") % jsdir
sys.exit(1)
# Read the values.
@ -107,7 +108,7 @@ class NullXULInfoTester:
def parse(filename, xul_tester, reldir = ''):
ans = []
comment_re = re.compile(r'#.*')
dir = os.path.dirname(filename)
dirname = os.path.dirname(filename)
try:
f = open(filename)
@ -124,7 +125,7 @@ def parse(filename, xul_tester, reldir = ''):
elif parts[0] == 'include':
include_file = parts[1]
include_reldir = os.path.join(reldir, os.path.dirname(include_file))
ans += parse(os.path.join(dir, include_file), xul_tester, include_reldir)
ans += parse(os.path.join(dirname, include_file), xul_tester, include_reldir)
elif parts[0] == 'url-prefix':
# Doesn't apply to shell tests
pass
@ -182,7 +183,8 @@ def parse(filename, xul_tester, reldir = ''):
elif fallback_action == "random":
random = True
else:
raise Exception("Invalid precondition '%s' or fallback action '%s'" % (precondition, fallback_action))
raise Exception(("Invalid precondition '%s' or fallback " +
" action '%s'") % (precondition, fallback_action))
break
pos += 1
elif parts[pos] == 'script':
@ -204,3 +206,27 @@ def parse(filename, xul_tester, reldir = ''):
ans.append(TestCase(os.path.join(reldir, script),
enable, expect, random, slow, debugMode))
return ans
def check_manifest(test_list):
test_set = set([ _.path for _ in test_list ])
missing = []
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if dirpath == '.': continue
if not filename.endswith('.js'): continue
if filename in ('browser.js', 'shell.js', 'jsref.js', 'template.js'): continue
path = os.path.join(dirpath, filename)
if path.startswith('./'):
path = path[2:]
if path not in test_set:
missing.append(path)
if missing:
print "Test files not contained in any manifest:"
for path in missing:
print path
else:
print 'All test files are listed in manifests'

191
js/src/tests/results.py Normal file
View File

@ -0,0 +1,191 @@
import re
class TestOutput:
"""Output from a test run."""
def __init__(self, test, cmd, out, err, rc, dt, timed_out):
self.test = test # Test
self.cmd = cmd # str: command line of test
self.out = out # str: stdout
self.err = err # str: stderr
self.rc = rc # int: return code
self.dt = dt # float: run time
self.timed_out = timed_out # bool: did the test time out
class NullTestOutput:
"""Variant of TestOutput that indicates a test was not run."""
def __init__(self, test):
self.test = test
self.cmd = ''
self.out = ''
self.err = ''
self.rc = 0
self.dt = 0.0
self.timed_out = False
class TestResult:
PASS = 'PASS'
FAIL = 'FAIL'
CRASH = 'CRASH'
"""Classified result from a test run."""
def __init__(self, test, result, results):
self.test = test
self.result = result
self.results = results
@classmethod
def from_output(cls, output):
test = output.test
result = None # str: overall result, see class-level variables
results = [] # (str,str) list: subtest results (pass/fail, message)
out, rc = output.out, output.rc
failures = 0
passes = 0
expected_rcs = []
if test.path.endswith('-n.js'):
expected_rcs.append(3)
for line in out.split('\n'):
if line.startswith(' FAILED!'):
failures += 1
msg = line[len(' FAILED! '):]
results.append((cls.FAIL, msg))
elif line.startswith(' PASSED!'):
passes += 1
msg = line[len(' PASSED! '):]
results.append((cls.PASS, msg))
else:
m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
if m:
expected_rcs.append(int(m.group(1)))
if rc and not rc in expected_rcs:
if rc == 3:
result = cls.FAIL
else:
result = cls.CRASH
else:
if (rc or passes > 0) and failures == 0:
result = cls.PASS
else:
result = cls.FAIL
return cls(test, result, results)
class ResultsSink:
def __init__(self, output_file, options):
self.output_file = output_file
self.options = options
self.groups = {}
self.counts = [ 0, 0, 0 ]
self.n = 0
self.finished = False
self.pb = None
def push(self, output):
if isinstance(output, NullTestOutput):
if self.options.tinderbox:
print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
self.counts[2] += 1
self.n += 1
else:
if self.options.show_cmd:
print >> self.output_file, list2cmdline(output.cmd)
if self.options.show_output:
print >> self.output_file, ' rc = %d, run time = %f' % (output.rc, output.dt)
self.output_file.write(output.out)
self.output_file.write(output.err)
result = TestResult.from_output(output)
tup = (result.result, result.test.expect, result.test.random)
dev_label = self.LABELS[tup][1]
if output.timed_out:
dev_label = 'TIMEOUTS'
self.groups.setdefault(dev_label, []).append(result.test.path)
self.n += 1
if result.result == TestResult.PASS and not result.test.random:
self.counts[0] += 1
elif result.test.expect and not result.test.random:
self.counts[1] += 1
else:
self.counts[2] += 1
if self.options.tinderbox:
if len(result.results) > 1:
for sub_ok, msg in result.results:
label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
if label == 'TEST-UNEXPECTED-PASS':
label = 'TEST-PASS (EXPECTED RANDOM)'
print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
print_tinderbox_result(self.LABELS[
(result.result, result.test.expect, result.test.random)][0],
result.test.path, time=output.dt)
if self.pb:
self.pb.label = '[%4d|%4d|%4d]'%tuple(self.counts)
self.pb.update(self.n)
# Conceptually, this maps (test result x test expection) to text labels.
# key is (result, expect, random)
# value is (tinderbox label, dev test category)
LABELS = {
(TestResult.CRASH, False, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, False, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.CRASH, True, True): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.FAIL, False, False): ('TEST-KNOWN-FAIL', ''),
(TestResult.FAIL, False, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
(TestResult.FAIL, True, False): ('TEST-UNEXPECTED-FAIL', 'REGRESSIONS'),
(TestResult.FAIL, True, True): ('TEST-KNOWN-FAIL (EXPECTED RANDOM)', ''),
(TestResult.PASS, False, False): ('TEST-UNEXPECTED-PASS', 'FIXES'),
(TestResult.PASS, False, True): ('TEST-PASS (EXPECTED RANDOM)', ''),
(TestResult.PASS, True, False): ('TEST-PASS', ''),
(TestResult.PASS, True, True): ('TEST-PASS (EXPECTED RANDOM)', ''),
}
def list(self):
for label, paths in sorted(self.groups.items()):
if label == '': continue
print label
for path in paths:
print ' %s'%path
if self.options.failure_file:
failure_file = open(self.options.failure_file, 'w')
if not self.all_passed():
for path in self.groups['REGRESSIONS'] + self.groups['TIMEOUTS']:
print >> failure_file, path
failure_file.close()
suffix = '' if self.finished else ' (partial run -- interrupted by user)'
if self.all_passed():
print 'PASS' + suffix
else:
print 'FAIL' + suffix
def all_passed(self):
return 'REGRESSIONS' not in self.groups and 'TIMEOUTS' not in self.groups
def print_tinderbox_result(self, label, path, message=None, skip=False, time=None):
result = label
result += " | " + path
result += " |" + self.options.shell_args
if message:
result += " | " + message
if skip:
result += ' | (SKIP)'
if time > self.options.timeout:
result += ' | (TIMEOUT)'
print result

202
js/src/tests/tasks_unix.py Normal file
View File

@ -0,0 +1,202 @@
# A unix-oriented process dispatcher. Uses a single thread with select and
# waitpid to dispatch tasks. This avoids several deadlocks that are possible
# with fork/exec + threads + Python.
import errno, os, sys, select
from datetime import datetime, timedelta
from results import TestOutput
PROGRESS_BAR_GRANULARITY = 0.1 #sec
class Task(object):
def __init__(self, test, pid, stdout, stderr):
self.test = test
self.cmd = test.get_command(test.js_cmd_prefix)
self.pid = pid
self.stdout = stdout
self.stderr = stderr
self.start = datetime.now()
self.out = []
self.err = []
def spawn_test(test):
"""Spawn one child, return a task struct."""
(rout, wout) = os.pipe()
(rerr, werr) = os.pipe()
rv = os.fork()
# Parent.
if rv:
os.close(wout)
os.close(werr)
return Task(test, rv, rout, rerr)
# Child.
os.close(rout)
os.close(rerr)
os.dup2(wout, 1)
os.dup2(werr, 2)
cmd = test.get_command(test.js_cmd_prefix)
os.execvp(cmd[0], cmd)
def get_max_wait(tasks, results, timeout):
"""
Return the maximum time we can wait before any task should time out.
"""
now = datetime.now()
wait = timedelta(0)
for task in tasks:
remaining = timedelta(seconds=timeout) - (now - task.start)
if remaining > wait:
wait = remaining
wait = wait.total_seconds()
# The test harness uses a timeout of 0 to indicate we should wait forever,
# but for select(), a timeout of 0 indicates a zero-length wait. Instead,
# translate the timeout into None to tell select to wait forever.
if wait == 0:
return None
# If we have a progress-meter, we need to wake up to update it frequently.
if results.pb is not None:
wait = min(wait, PROGRESS_BAR_GRANULARITY)
return wait
def flush_input(fd, frags):
"""
Read any pages sitting in the file descriptor 'fd' into the list 'frags'.
"""
rv = os.read(fd, 4096)
frags.append(rv)
while len(rv) == 4096:
# If read() returns a full buffer, it may indicate there was 1 buffer
# worth of data, or that there is more data to read. Poll the socket
# before we read again to ensure that we will not block indefinitly.
readable, _, _ = select.select([fd], [], [], 0)
if not readable:
return
rv = os.read(fd, 4096)
frags.append(rv)
def read_input(tasks, timeout):
"""
Select on input or errors from the given task list for a max of timeout
seconds.
"""
rlist = []
exlist = []
outmap = {} # Fast access to fragment list given fd.
for t in tasks:
rlist.append(t.stdout)
rlist.append(t.stderr)
outmap[t.stdout] = t.out
outmap[t.stderr] = t.err
# This will trigger with a close event when the child dies, allowing
# us to respond immediately and not leave cores idle.
exlist.append(t.stdout)
readable, _, _ = select.select(rlist, [], exlist, timeout)
for fd in readable:
flush_input(fd, outmap[fd])
def remove_task(tasks, pid):
"""
Return a pair with the removed task and the new, modified tasks list.
"""
index = None
for i, t in enumerate(tasks):
if t.pid == pid:
index = i
break
else:
raise KeyError("No such pid: %s" % pid)
out = tasks[index]
tasks.pop(index)
return out
def timed_out(task, timeout):
"""
Return True if the given task has been running for longer than |timeout|.
|timeout| may be falsy, indicating an infinite timeout (in which case
timed_out always returns False).
"""
if timeout:
now = datetime.now()
return (now - task.start) > timedelta(seconds=timeout)
return False
def reap_zombies(tasks, results, timeout):
"""
Search for children of this process that have finished. If they are tasks,
then this routine will clean up the child and send a TestOutput to the
results channel. This method returns a new task list that has had the ended
tasks removed.
"""
while True:
try:
pid, status = os.waitpid(0, os.WNOHANG)
if pid == 0:
break
except OSError, e:
if e.errno == errno.ECHILD:
break
raise e
ended = remove_task(tasks, pid)
flush_input(ended.stdout, ended.out)
flush_input(ended.stderr, ended.err)
os.close(ended.stdout)
os.close(ended.stderr)
out = TestOutput(
ended.test,
ended.cmd,
''.join(ended.out),
''.join(ended.err),
os.WEXITSTATUS(status),
(datetime.now() - ended.start).total_seconds(),
timed_out(ended, timeout))
results.push(out)
return tasks
def kill_undead(tasks, results, timeout):
"""
Signal all children that are over the given timeout.
"""
for task in tasks:
if timed_out(task, timeout):
os.kill(task.pid, 9)
def run_all_tests(tests, results, options):
# Copy and reverse for fast pop off end.
tests = tests[:]
tests.reverse()
# The set of currently running tests.
tasks = []
while len(tests) or len(tasks):
while len(tests) and len(tasks) < options.worker_count:
tasks.append(spawn_test(tests.pop()))
timeout = get_max_wait(tasks, results, options.timeout)
read_input(tasks, timeout)
# We attempt to reap once before forcibly killing timed out tasks so
# that anything that died during our sleep is not marked as timed out
# in the test results.
tasks = reap_zombies(tasks, results, False)
if kill_undead(tasks, results, options.timeout):
tasks = reap_zombies(tasks, results, options.timeout)
if results.pb:
results.pb.update(results.n)
return True

View File

@ -6,19 +6,20 @@ from Queue import Queue, Empty
from datetime import datetime
class Source:
def __init__(self, task_list, results, verbose = False):
def __init__(self, task_list, results, timeout, verbose = False):
self.tasks = Queue()
for task in task_list:
self.tasks.put_nowait(task)
self.results = results
self.timeout = timeout
self.verbose = verbose
def start(self, worker_count):
t0 = datetime.now()
sink = Sink(self.results)
self.workers = [ Worker(_+1, self.tasks, sink, self.verbose) for _ in range(worker_count) ]
self.workers = [ Worker(_+1, self.tasks, sink, self.timeout, self.verbose) for _ in range(worker_count) ]
if self.verbose: print '[P] Starting workers.'
for w in self.workers:
w.t0 = t0
@ -34,7 +35,7 @@ class Source:
def join_workers(self):
try:
for w in self.workers:
w.thread.join(20000)
w.join(20000)
return True
except KeyboardInterrupt:
for w in self.workers:
@ -53,28 +54,25 @@ class Sink:
finally:
self.lock.release()
class Worker(object):
def __init__(self, id, tasks, sink, verbose):
class Worker(Thread):
def __init__(self, id, tasks, sink, timeout, verbose):
Thread.__init__(self)
self.setDaemon(True)
self.id = id
self.tasks = tasks
self.sink = sink
self.timeout = timeout
self.verbose = verbose
self.thread = None
self.stop = False
def log(self, msg):
dd = datetime.now() - self.t0
dt = dd.seconds + 1e-6 * dd.microseconds
if self.verbose:
dd = datetime.now() - self.t0
dt = dd.seconds + 1e-6 * dd.microseconds
print '[W%d %.3f] %s' % (self.id, dt, msg)
def start(self):
self.thread = Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def run(self):
try:
while True:
@ -83,9 +81,14 @@ class Worker(object):
self.log('Get next task.')
task = self.tasks.get(False)
self.log('Start task %s.'%str(task))
result = task()
result = task.run(task.js_cmd_prefix, self.timeout)
self.log('Finished task.')
self.sink.push(result)
self.log('Pushed result.')
except Empty:
pass
def run_all_tests(tests, results, options):
pipeline = Source(tests, results, options.timeout, False)
return pipeline.start(options.worker_count)

View File

@ -7,6 +7,8 @@ import datetime, os, re, sys, time
from subprocess import *
from threading import *
from results import TestOutput
def do_run_cmd(cmd):
l = [ None, None ]
th_run_cmd(cmd, l)
@ -57,7 +59,7 @@ def run_cmd(cmd, timeout=60.0):
os.kill(l[0].pid, signal.SIGKILL)
time.sleep(.1)
timed_out = True
except OSError:
except OSError, e:
# Expecting a "No such process" error
pass
th.join()
@ -77,8 +79,8 @@ class Test(object):
return Test.prefix_command(head) + [ '-f', os.path.join(path, 'shell.js') ]
def get_command(self, js_cmd_prefix):
dir, filename = os.path.split(self.path)
cmd = js_cmd_prefix + Test.prefix_command(dir)
dirname, filename = os.path.split(self.path)
cmd = js_cmd_prefix + Test.prefix_command(dirname)
if self.debugMode:
cmd += [ '-d' ]
# There is a test that requires the path to start with './'.
@ -92,6 +94,7 @@ class Test(object):
class TestCase(Test):
"""A test case consisting of a test and an expected result."""
js_cmd_prefix = None
def __init__(self, path, enable, expect, random, slow, debugMode):
Test.__init__(self, path)
@ -115,77 +118,12 @@ class TestCase(Test):
ans += ', debugMode'
return ans
class TestOutput:
"""Output from a test run."""
def __init__(self, test, cmd, out, err, rc, dt, timed_out):
self.test = test # Test
self.cmd = cmd # str: command line of test
self.out = out # str: stdout
self.err = err # str: stderr
self.rc = rc # int: return code
self.dt = dt # float: run time
self.timed_out = timed_out # bool: did the test time out
class NullTestOutput:
"""Variant of TestOutput that indicates a test was not run."""
def __init__(self, test):
self.test = test
self.cmd = ''
self.out = ''
self.err = ''
self.rc = 0
self.dt = 0.0
self.timed_out = False
class TestResult:
PASS = 'PASS'
FAIL = 'FAIL'
CRASH = 'CRASH'
"""Classified result from a test run."""
def __init__(self, test, result, results):
self.test = test
self.result = result
self.results = results
@classmethod
def from_output(cls, output):
test = output.test
result = None # str: overall result, see class-level variables
results = [] # (str,str) list: subtest results (pass/fail, message)
out, rc = output.out, output.rc
failures = 0
passes = 0
expected_rcs = []
if test.path.endswith('-n.js'):
expected_rcs.append(3)
for line in out.split('\n'):
if line.startswith(' FAILED!'):
failures += 1
msg = line[len(' FAILED! '):]
results.append((cls.FAIL, msg))
elif line.startswith(' PASSED!'):
passes += 1
msg = line[len(' PASSED! '):]
results.append((cls.PASS, msg))
else:
m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
if m:
expected_rcs.append(int(m.group(1)))
if rc and not rc in expected_rcs:
if rc == 3:
result = cls.FAIL
else:
result = cls.CRASH
else:
if (rc or passes > 0) and failures == 0:
result = cls.PASS
else:
result = cls.FAIL
return cls(test, result, results)
def set_js_cmd_prefix(self, js_path, js_args, debugger_prefix):
parts = []
if debugger_prefix:
parts += debugger_prefix
parts.append(js_path)
if js_args:
parts += js_args
self.js_cmd_prefix = parts