mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-25 15:01:07 +00:00
[lit] Factor out report generators into separate file
Factor out the report generators from main.py into reports.py. I verified that we generate the exact same output by running `check-all` and comparing the new and old output for both report flavors.
This commit is contained in:
parent
63e5e7b26a
commit
8c252224cc
@ -368,45 +368,3 @@ class Test:
|
||||
parallelism or where it is desirable to surface their failures early.
|
||||
"""
|
||||
return self.suite.config.is_early
|
||||
|
||||
def writeJUnitXML(self, fil):
|
||||
"""Write the test's report xml representation to a file handle."""
|
||||
test_name = quoteattr(self.path_in_suite[-1])
|
||||
test_path = self.path_in_suite[:-1]
|
||||
safe_test_path = [x.replace(".","_") for x in test_path]
|
||||
safe_name = self.suite.name.replace(".","-")
|
||||
|
||||
if safe_test_path:
|
||||
class_name = safe_name + "." + "/".join(safe_test_path)
|
||||
else:
|
||||
class_name = safe_name + "." + safe_name
|
||||
class_name = quoteattr(class_name)
|
||||
testcase_template = '<testcase classname={class_name} name={test_name} time="{time:.2f}"'
|
||||
elapsed_time = self.result.elapsed if self.result.elapsed is not None else 0.0
|
||||
testcase_xml = testcase_template.format(class_name=class_name, test_name=test_name, time=elapsed_time)
|
||||
fil.write(testcase_xml)
|
||||
if self.isFailure():
|
||||
fil.write(">\n\t<failure ><![CDATA[")
|
||||
# In Python2, 'str' and 'unicode' are distinct types, but in Python3, the type 'unicode' does not exist
|
||||
# and instead 'bytes' is distinct
|
||||
# in Python3, there's no unicode
|
||||
if isinstance(self.result.output, str):
|
||||
encoded_output = self.result.output
|
||||
elif isinstance(self.result.output, bytes):
|
||||
encoded_output = self.result.output.decode("utf-8", 'ignore')
|
||||
else:
|
||||
encoded_output = self.result.output.encode("utf-8", 'ignore')
|
||||
# In the unlikely case that the output contains the CDATA terminator
|
||||
# we wrap it by creating a new CDATA block
|
||||
fil.write(encoded_output.replace("]]>", "]]]]><![CDATA[>"))
|
||||
fil.write("]]></failure>\n</testcase>")
|
||||
elif self.result.code == UNSUPPORTED:
|
||||
unsupported_features = self.getMissingRequiredFeatures()
|
||||
if unsupported_features:
|
||||
skip_message = "Skipping because of: " + ", ".join(unsupported_features)
|
||||
else:
|
||||
skip_message = "Skipping because of configuration."
|
||||
|
||||
fil.write(">\n\t<skipped message={} />\n</testcase>\n".format(quoteattr(skip_message)))
|
||||
else:
|
||||
fil.write("/>")
|
||||
|
@ -333,95 +333,13 @@ def print_summary(tests_by_code, quiet, elapsed):
|
||||
|
||||
|
||||
def write_test_results(tests, lit_config, elapsed, output_path):
|
||||
# TODO(yln): audit: unexecuted tests
|
||||
# Construct the data we will write.
|
||||
data = {}
|
||||
# Encode the current lit version as a schema version.
|
||||
data['__version__'] = lit.__versioninfo__
|
||||
data['elapsed'] = elapsed
|
||||
# FIXME: Record some information on the lit configuration used?
|
||||
# FIXME: Record information from the individual test suites?
|
||||
import lit.reports
|
||||
r = lit.reports.JsonReport(output_path)
|
||||
r.write_results(tests, elapsed)
|
||||
|
||||
# Encode the tests.
|
||||
data['tests'] = tests_data = []
|
||||
for test in tests:
|
||||
test_data = {
|
||||
'name' : test.getFullName(),
|
||||
'code' : test.result.code.name,
|
||||
'output' : test.result.output,
|
||||
'elapsed' : test.result.elapsed }
|
||||
|
||||
# Add test metrics, if present.
|
||||
if test.result.metrics:
|
||||
test_data['metrics'] = metrics_data = {}
|
||||
for key, value in test.result.metrics.items():
|
||||
metrics_data[key] = value.todata()
|
||||
|
||||
# Report micro-tests separately, if present
|
||||
if test.result.microResults:
|
||||
for key, micro_test in test.result.microResults.items():
|
||||
# Expand parent test name with micro test name
|
||||
parent_name = test.getFullName()
|
||||
micro_full_name = parent_name + ':' + key
|
||||
|
||||
micro_test_data = {
|
||||
'name' : micro_full_name,
|
||||
'code' : micro_test.code.name,
|
||||
'output' : micro_test.output,
|
||||
'elapsed' : micro_test.elapsed }
|
||||
if micro_test.metrics:
|
||||
micro_test_data['metrics'] = micro_metrics_data = {}
|
||||
for key, value in micro_test.metrics.items():
|
||||
micro_metrics_data[key] = value.todata()
|
||||
|
||||
tests_data.append(micro_test_data)
|
||||
|
||||
tests_data.append(test_data)
|
||||
|
||||
# Write the output.
|
||||
f = open(output_path, 'w')
|
||||
try:
|
||||
import json
|
||||
json.dump(data, f, indent=2, sort_keys=True)
|
||||
f.write('\n')
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def write_test_results_xunit(tests, opts):
|
||||
# TODO(yln): audit: unexecuted tests
|
||||
from xml.sax.saxutils import quoteattr
|
||||
# Collect the tests, indexed by test suite
|
||||
by_suite = {}
|
||||
for result_test in tests:
|
||||
suite = result_test.suite.config.name
|
||||
if suite not in by_suite:
|
||||
by_suite[suite] = {
|
||||
'passes' : 0,
|
||||
'failures' : 0,
|
||||
'skipped': 0,
|
||||
'tests' : [] }
|
||||
by_suite[suite]['tests'].append(result_test)
|
||||
if result_test.isFailure():
|
||||
by_suite[suite]['failures'] += 1
|
||||
elif result_test.result.code == lit.Test.UNSUPPORTED:
|
||||
by_suite[suite]['skipped'] += 1
|
||||
else:
|
||||
by_suite[suite]['passes'] += 1
|
||||
xunit_output_file = open(opts.xunit_output_file, "w")
|
||||
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
|
||||
xunit_output_file.write("<testsuites>\n")
|
||||
for suite_name, suite in by_suite.items():
|
||||
safe_suite_name = quoteattr(suite_name.replace(".", "-"))
|
||||
xunit_output_file.write("<testsuite name=" + safe_suite_name)
|
||||
xunit_output_file.write(" tests=\"" + str(suite['passes'] +
|
||||
suite['failures'] + suite['skipped']) + "\"")
|
||||
xunit_output_file.write(" failures=\"" + str(suite['failures']) + "\"")
|
||||
xunit_output_file.write(" skipped=\"" + str(suite['skipped']) +
|
||||
"\">\n")
|
||||
import lit.reports
|
||||
r = lit.reports.XunitReport(opts.xunit_output_file)
|
||||
r.write_results(tests, 0.0)
|
||||
|
||||
for result_test in suite['tests']:
|
||||
result_test.writeJUnitXML(xunit_output_file)
|
||||
xunit_output_file.write("\n")
|
||||
xunit_output_file.write("</testsuite>\n")
|
||||
xunit_output_file.write("</testsuites>")
|
||||
xunit_output_file.close()
|
||||
|
128
utils/lit/lit/reports.py
Executable file
128
utils/lit/lit/reports.py
Executable file
@ -0,0 +1,128 @@
|
||||
import itertools
|
||||
import json
|
||||
|
||||
from xml.sax.saxutils import quoteattr as quo
|
||||
|
||||
import lit.Test
|
||||
|
||||
|
||||
class JsonReport(object):
|
||||
def __init__(self, output_file):
|
||||
self.output_file = output_file
|
||||
|
||||
def write_results(self, tests, elapsed):
|
||||
assert not any(t.result.code in {lit.Test.EXCLUDED, lit.Test.SKIPPED} for t in tests)
|
||||
# Construct the data we will write.
|
||||
data = {}
|
||||
# Encode the current lit version as a schema version.
|
||||
data['__version__'] = lit.__versioninfo__
|
||||
data['elapsed'] = elapsed
|
||||
# FIXME: Record some information on the lit configuration used?
|
||||
# FIXME: Record information from the individual test suites?
|
||||
|
||||
# Encode the tests.
|
||||
data['tests'] = tests_data = []
|
||||
for test in tests:
|
||||
test_data = {
|
||||
'name': test.getFullName(),
|
||||
'code': test.result.code.name,
|
||||
'output': test.result.output,
|
||||
'elapsed': test.result.elapsed}
|
||||
|
||||
# Add test metrics, if present.
|
||||
if test.result.metrics:
|
||||
test_data['metrics'] = metrics_data = {}
|
||||
for key, value in test.result.metrics.items():
|
||||
metrics_data[key] = value.todata()
|
||||
|
||||
# Report micro-tests separately, if present
|
||||
if test.result.microResults:
|
||||
for key, micro_test in test.result.microResults.items():
|
||||
# Expand parent test name with micro test name
|
||||
parent_name = test.getFullName()
|
||||
micro_full_name = parent_name + ':' + key
|
||||
|
||||
micro_test_data = {
|
||||
'name': micro_full_name,
|
||||
'code': micro_test.code.name,
|
||||
'output': micro_test.output,
|
||||
'elapsed': micro_test.elapsed}
|
||||
if micro_test.metrics:
|
||||
micro_test_data['metrics'] = micro_metrics_data = {}
|
||||
for key, value in micro_test.metrics.items():
|
||||
micro_metrics_data[key] = value.todata()
|
||||
|
||||
tests_data.append(micro_test_data)
|
||||
|
||||
tests_data.append(test_data)
|
||||
|
||||
with open(self.output_file, 'w') as file:
|
||||
json.dump(data, file, indent=2, sort_keys=True)
|
||||
file.write('\n')
|
||||
|
||||
|
||||
class XunitReport(object):
|
||||
def __init__(self, output_file):
|
||||
self.output_file = output_file
|
||||
self.skipped_codes = {lit.Test.EXCLUDED,
|
||||
lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
|
||||
|
||||
# TODO(yln): elapsed unused, put it somewhere?
|
||||
def write_results(self, tests, elapsed):
|
||||
assert not any(t.result.code in {lit.Test.EXCLUDED, lit.Test.SKIPPED} for t in tests)
|
||||
# Suite names are not necessarily unique. Include object identity in
|
||||
# sort key to avoid mixing tests of different suites.
|
||||
tests.sort(key=lambda t: (t.suite.name, id(t.suite), t.path_in_suite))
|
||||
tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
|
||||
|
||||
with open(self.output_file, 'w') as file:
|
||||
file.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
|
||||
file.write('<testsuites>\n')
|
||||
for suite, test_iter in tests_by_suite:
|
||||
self._write_testsuite(file, suite, list(test_iter))
|
||||
file.write('</testsuites>\n')
|
||||
|
||||
def _write_testsuite(self, file, suite, tests):
|
||||
skipped = sum(1 for t in tests if t.result.code in self.skipped_codes)
|
||||
failures = sum(1 for t in tests if t.isFailure())
|
||||
|
||||
name = suite.config.name.replace('.', '-')
|
||||
file.write(f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n')
|
||||
for test in tests:
|
||||
self._write_test(file, test, name)
|
||||
file.write('</testsuite>\n')
|
||||
|
||||
def _write_test(self, file, test, suite_name):
|
||||
path = '/'.join(test.path_in_suite[:-1]).replace('.', '_')
|
||||
class_name = f'{suite_name}.{path or suite_name}'
|
||||
name = test.path_in_suite[-1]
|
||||
time = test.result.elapsed or 0.0
|
||||
file.write(f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"')
|
||||
|
||||
if test.isFailure():
|
||||
file.write('>\n\t<failure ><![CDATA[')
|
||||
# In the unlikely case that the output contains the CDATA
|
||||
# terminator we wrap it by creating a new CDATA block.
|
||||
output = test.result.output.replace(']]>', ']]]]><![CDATA[>')
|
||||
if isinstance(output, bytes):
|
||||
output.decode("utf-8", 'ignore')
|
||||
file.write(output)
|
||||
file.write(']]></failure>\n</testcase>\n')
|
||||
elif test.result.code in self.skipped_codes:
|
||||
reason = self._get_skip_reason(test)
|
||||
file.write(f'>\n\t<skipped message={quo(reason)} />\n</testcase>\n\n')
|
||||
else:
|
||||
file.write('/>\n')
|
||||
|
||||
def _get_skip_reason(self, test):
|
||||
code = test.result.code
|
||||
if code == lit.Test.EXCLUDED:
|
||||
return 'Test not selected (--filter, --max-tests, --run-shard)'
|
||||
if code == lit.Test.SKIPPED:
|
||||
return 'User interrupt'
|
||||
|
||||
assert code == lit.Test.UNSUPPORTED
|
||||
features = test.getMissingRequiredFeatures()
|
||||
if features:
|
||||
return 'Skipping because of: ' + ', '.join(features)
|
||||
return 'Skipping because of configuration.'
|
Loading…
x
Reference in New Issue
Block a user