mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-10-26 03:35:33 +00:00
Bug 1567570 [wpt PR 17944] - Update chromium log formatter to treat subtest fails as test failure., a=testonly
Automatic update from web-platform-tests Update chromium log formatter to treat subtest fails as test failure. (#17944) * Update chromium log formatter to handle subtest fails as test failure. If a test has subtest failures the runner reports the test status as a success, but for Chromium this is generally considered a failure. So we keep track of which tests have subtest failures and flip their status to FAIL if necessary. -- wpt-commits: 8faf1fb3d5cfe08ed15eb2acd8aa1460edf1c3ee wpt-pr: 17944
This commit is contained in:
parent
f0048007d4
commit
d0de03b111
@ -28,11 +28,15 @@ class ChromiumFormatter(base.BaseFormatter):
|
||||
# the subtest messages for this test.
|
||||
self.messages = defaultdict(str)
|
||||
|
||||
# List of tests that have failing subtests.
|
||||
self.tests_with_subtest_fails = set()
|
||||
|
||||
def _append_test_message(self, test, subtest, status, message):
|
||||
"""
|
||||
Appends the message data for a test.
|
||||
:param str test: the name of the test
|
||||
:param str subtest: the name of the subtest with the message
|
||||
:param str status: the subtest status
|
||||
:param str message: the string to append to the message for this test
|
||||
"""
|
||||
if not message:
|
||||
@ -107,15 +111,25 @@ class ChromiumFormatter(base.BaseFormatter):
|
||||
else time.time())
|
||||
|
||||
def test_status(self, data):
|
||||
test_name = data["test"]
|
||||
if data["status"] != "PASS" and test_name not in self.tests_with_subtest_fails:
|
||||
self.tests_with_subtest_fails.add(test_name)
|
||||
if "message" in data:
|
||||
self._append_test_message(data["test"], data["subtest"],
|
||||
self._append_test_message(test_name, data["subtest"],
|
||||
data["status"], data["message"])
|
||||
|
||||
def test_end(self, data):
|
||||
actual_status = self._map_status_name(data["status"])
|
||||
expected_status = (self._map_status_name(data["expected"])
|
||||
if "expected" in data else "PASS")
|
||||
test_name = data["test"]
|
||||
actual_status = self._map_status_name(data["status"])
|
||||
if actual_status == "PASS" and test_name in self.tests_with_subtest_fails:
|
||||
# This test passed but it has failing subtests, so we flip the status
|
||||
# to FAIL.
|
||||
actual_status = "FAIL"
|
||||
# Clean up the test list to avoid accumulating too many.
|
||||
self.tests_with_subtest_fails.remove(test_name)
|
||||
|
||||
if "message" in data:
|
||||
self._append_test_message(test_name, None, actual_status,
|
||||
data["message"])
|
||||
|
@ -162,3 +162,52 @@ def test_subtest_messages(capfd):
|
||||
|
||||
t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
|
||||
assert t2_log == "[TIMEOUT] t2_message\n"
|
||||
|
||||
|
||||
def test_subtest_failure(capfd):
|
||||
# Tests that a test fails if a subtest fails
|
||||
|
||||
# Set up the handler.
|
||||
output = StringIO()
|
||||
logger = structuredlog.StructuredLogger("test_a")
|
||||
formatter = ChromiumFormatter()
|
||||
logger.add_handler(handlers.StreamHandler(output, formatter))
|
||||
|
||||
# Run a test with some subtest failures.
|
||||
logger.suite_start(["t1"], run_info={}, time=123)
|
||||
logger.test_start("t1")
|
||||
logger.test_status("t1", status="FAIL", subtest="t1_a",
|
||||
message="t1_a_message")
|
||||
logger.test_status("t1", status="PASS", subtest="t1_b",
|
||||
message="t1_b_message")
|
||||
logger.test_status("t1", status="TIMEOUT", subtest="t1_c",
|
||||
message="t1_c_message")
|
||||
|
||||
# Make sure the test name was added to the set of tests with subtest fails
|
||||
assert "t1" in formatter.tests_with_subtest_fails
|
||||
|
||||
# The test status is reported as a pass here because the harness was able to
|
||||
# run the test to completion.
|
||||
logger.test_end("t1", status="PASS", expected="PASS")
|
||||
logger.suite_end()
|
||||
|
||||
# check nothing got output to stdout/stderr
|
||||
# (note that mozlog outputs exceptions during handling to stderr!)
|
||||
captured = capfd.readouterr()
|
||||
assert captured.out == ""
|
||||
assert captured.err == ""
|
||||
|
||||
# check the actual output of the formatter
|
||||
output.seek(0)
|
||||
output_json = json.load(output)
|
||||
|
||||
test_obj = output_json["tests"]["t1"]
|
||||
t1_log = test_obj["artifacts"]["log"]
|
||||
assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
|
||||
"[PASS] t1_b: t1_b_message\n" \
|
||||
"[TIMEOUT] t1_c: t1_c_message\n"
|
||||
# The status of the test in the output is a failure because subtests failed,
|
||||
# despite the harness reporting that the test passed.
|
||||
assert test_obj["actual"] == "FAIL"
|
||||
# Also ensure that the formatter cleaned up its internal state
|
||||
assert "t1" not in formatter.tests_with_subtest_fails
|
||||
|
Loading…
Reference in New Issue
Block a user