Bug 1417267 - Output structured logs for jstests and jit-tests in automation, r=jonco

--HG--
extra : rebase_source : e428c4f582b6e6b50de3709fd2e1a0106ca23262
extra : source : a83b373d2d2ec45343f1612929881e00c099843e
This commit is contained in:
Steve Fink 2017-11-14 15:36:47 -08:00
parent 270cf4bba9
commit a1edd33ebf
5 changed files with 107 additions and 15 deletions

View File

@ -367,7 +367,7 @@ def main(argv):
try:
ok = None
if options.remote:
ok = jittests.run_tests_remote(job_list, job_count, prefix, options)
ok = jittests.run_tests(job_list, job_count, prefix, options, remote=True)
else:
with change_env(test_environment):
ok = jittests.run_tests(job_list, job_count, prefix, options)

View File

@ -377,7 +377,7 @@ def main():
return 0
with changedir(test_dir), change_env(test_environment):
results = ResultsSink(options, test_count)
results = ResultsSink('jstests', options, test_count)
try:
for out in run_all_tests(test_gen, prefix, results.pb, options):
results.push(out)

View File

@ -19,6 +19,7 @@ else:
from progressbar import ProgressBar, NullProgressBar
from results import TestOutput
from structuredlog import TestLogger
TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
@ -467,7 +468,7 @@ def check_output(out, err, rc, timed_out, test, options):
return True
def print_automation_format(ok, res):
def print_automation_format(ok, res, slog):
# Output test failures in a parsable format suitable for automation, eg:
# TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
#
@ -487,6 +488,14 @@ def print_automation_format(ok, res):
print("{} | {} | {} (code {}, args \"{}\") [{:.1f} s]".format(
result, res.test.relpath_top, message, res.rc, jitflags, res.dt))
details = {
'message': message,
'extra': {
'jitflags': jitflags,
}
}
slog.test(res.test.relpath_tests, 'PASS' if ok else 'FAIL', res.dt, **details)
# For failed tests, print as much information as we have, to aid debugging.
if ok:
return
@ -561,7 +570,7 @@ def create_progressbar(num_tests, options):
return ProgressBar(num_tests, fmt)
return NullProgressBar()
def process_test_results(results, num_tests, pb, options):
def process_test_results(results, num_tests, pb, options, slog):
failures = []
timeouts = 0
complete = False
@ -610,7 +619,7 @@ def process_test_results(results, num_tests, pb, options):
pb.message("FAIL - {}".format(res.test.relpath_tests))
if options.format == 'automation':
print_automation_format(ok, res)
print_automation_format(ok, res, slog)
n = i + 1
pb.update(n, {
@ -627,7 +636,23 @@ def process_test_results(results, num_tests, pb, options):
pb.finish(True)
return print_test_summary(num_tests, failures, complete, doing, options)
def run_tests(tests, num_tests, prefix, options):
def run_tests(tests, num_tests, prefix, options, remote=False):
slog = None
if options.format == 'automation':
slog = TestLogger("jittests")
slog.suite_start()
if remote:
ok = run_tests_remote(tests, num_tests, prefix, options, slog)
else:
ok = run_tests_local(tests, num_tests, prefix, options, slog)
if slog:
slog.suite_end()
return ok
def run_tests_local(tests, num_tests, prefix, options, slog):
# The jstests tasks runner requires the following options. The names are
# taken from the jstests options processing code, which are frequently
# subtly different from the options jit-tests expects. As such, we wrap
@ -643,7 +668,7 @@ def run_tests(tests, num_tests, prefix, options):
pb = create_progressbar(num_tests, options)
gen = run_all_tests(tests, prefix, pb, shim_options)
ok = process_test_results(gen, num_tests, pb, options)
ok = process_test_results(gen, num_tests, pb, options, slog)
return ok
def get_remote_results(tests, device, prefix, options):
@ -676,7 +701,7 @@ def push_progs(options, device, progs):
os.path.basename(local_file))
device.pushFile(local_file, remote_file)
def run_tests_remote(tests, num_tests, prefix, options):
def run_tests_remote(tests, num_tests, prefix, options, slog):
# Setup device with everything needed to run our tests.
from mozdevice import devicemanagerADB
@ -717,7 +742,7 @@ def run_tests_remote(tests, num_tests, prefix, options):
# Run all tests.
pb = create_progressbar(num_tests, options)
gen = get_remote_results(tests, dm, prefix, options)
ok = process_test_results(gen, num_tests, pb, options)
ok = process_test_results(gen, num_tests, pb, options, slog)
return ok
def platform_might_be_android():

View File

@ -1,8 +1,10 @@
from __future__ import print_function
import re
from progressbar import NullProgressBar, ProgressBar
import pipes
import re
from progressbar import NullProgressBar, ProgressBar
from structuredlog import TestLogger
# subprocess.list2cmdline does not properly escape for sh-like shells
def escape_cmdline(args):
@ -106,9 +108,12 @@ class TestDuration:
self.duration = duration
class ResultsSink:
def __init__(self, options, testcount):
def __init__(self, testsuite, options, testcount):
self.options = options
self.fp = options.output_fp
if self.options.format == 'automation':
self.slog = TestLogger(testsuite)
self.slog.suite_start()
self.groups = {}
self.output_dict = {}
@ -207,8 +212,7 @@ class ResultsSink:
label, result.test, time=output.dt,
message=msg)
tup = (result.result, result.test.expect, result.test.random)
self.print_automation_result(
self.LABELS[tup][0], result.test, time=output.dt)
self.print_automation_result(self.LABELS[tup][0], result.test, time=output.dt)
return
if dev_label:
@ -221,7 +225,9 @@ class ResultsSink:
def finish(self, completed):
self.pb.finish(completed)
if not self.options.format == 'automation':
if self.options.format == 'automation':
self.slog.suite_end()
else:
self.list(completed)
# Conceptually, this maps (test result x test expection) to text labels.
@ -302,3 +308,13 @@ class ResultsSink:
result += ' | (TIMEOUT)'
result += ' [{:.1f} s]'.format(time)
print(result)
details = { 'extra': {} }
if self.options.shell_args:
details['extra']['shell_args'] = self.options.shell_args
details['extra']['jitflags'] = test.jitflags
if message:
details['message'] = message
status = 'FAIL' if 'TEST-UNEXPECTED' in label else 'PASS'
self.slog.test(test.path, status, time or 0, **details)

View File

@ -0,0 +1,51 @@
# produce mozlog-compatible log messages, following the spec at https://mozbase.readthedocs.io/en/latest/mozlog.html
import json
import os
from time import time
class TestLogger(object):
def __init__(self, source, threadname='main'):
self.template = {
'source': source,
'thread': threadname,
'pid': os.getpid(),
}
def _record(self, **kwargs):
record = self.template.copy()
record.update(**kwargs)
if 'time' not in record:
record['time'] = time()
return record
def _log_obj(self, obj):
print(json.dumps(obj))
def _log(self, **kwargs):
self._log_obj(self._record(**kwargs))
def suite_start(self):
self._log(action='suite_start', tests=[])
def suite_end(self):
self._log(action='suite_end')
def test_start(self, testname):
self._log(action='test_start', test=testname)
def test_end(self, testname, status):
self._log(action='test_end', test=testname, status=status)
def test(self, testname, status, duration, **details):
record = self._record(action='test_start', test=testname)
end_time = record['time']
record['time'] -= duration
self._log_obj(record)
record['action'] = 'test_end'
record['time'] = end_time
record['status'] = status
record.update(**details)
self._log_obj(record)