mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-10-26 11:45:37 +00:00
Bug 1257799 - Update to latest wptrunner, a=testonly
MozReview-Commit-ID: K7Ro7yG0Nds
This commit is contained in:
parent
09e37ebf3f
commit
75640f0266
@ -203,10 +203,6 @@ When used for expectation data, manifests have the following format:
|
||||
the (sub)test is disabled and should either not be run (for tests)
|
||||
or that its results should be ignored (subtests).
|
||||
|
||||
* A key ``restart-after`` which can be set to any value to indicate that
|
||||
the runner should restart the browser after running this test (e.g. to
|
||||
clear out unwanted state).
|
||||
|
||||
* Variables ``debug``, ``os``, ``version``, ``processor`` and
|
||||
``bits`` that describe the configuration of the browser under
|
||||
test. ``debug`` is a boolean indicating whether a build is a debug
|
||||
|
@ -82,6 +82,7 @@ def update_properties():
|
||||
|
||||
class FirefoxBrowser(Browser):
|
||||
used_ports = set()
|
||||
init_timeout = 60
|
||||
|
||||
def __init__(self, logger, binary, prefs_root, debug_info=None,
|
||||
symbols_path=None, stackwalk_binary=None, certutil_binary=None,
|
||||
|
@ -206,11 +206,12 @@ class RefTestImplementation(object):
|
||||
def logger(self):
|
||||
return self.executor.logger
|
||||
|
||||
def get_hash(self, test):
|
||||
def get_hash(self, test, viewport_size, dpi):
|
||||
timeout = test.timeout * self.timeout_multiplier
|
||||
key = (test.url, viewport_size, dpi)
|
||||
|
||||
if test.url not in self.screenshot_cache:
|
||||
success, data = self.executor.screenshot(test)
|
||||
if key not in self.screenshot_cache:
|
||||
success, data = self.executor.screenshot(test, viewport_size, dpi)
|
||||
|
||||
if not success:
|
||||
return False, data
|
||||
@ -218,14 +219,14 @@ class RefTestImplementation(object):
|
||||
screenshot = data
|
||||
hash_value = hashlib.sha1(screenshot).hexdigest()
|
||||
|
||||
self.screenshot_cache[test.url] = (hash_value, None)
|
||||
self.screenshot_cache[key] = (hash_value, None)
|
||||
|
||||
rv = True, (hash_value, screenshot)
|
||||
rv = (hash_value, screenshot)
|
||||
else:
|
||||
rv = True, self.screenshot_cache[test.url]
|
||||
rv = self.screenshot_cache[key]
|
||||
|
||||
self.message.append("%s %s" % (test.url, rv[1][0]))
|
||||
return rv
|
||||
self.message.append("%s %s" % (test.url, rv[0]))
|
||||
return True, rv
|
||||
|
||||
def is_pass(self, lhs_hash, rhs_hash, relation):
|
||||
assert relation in ("==", "!=")
|
||||
@ -234,6 +235,8 @@ class RefTestImplementation(object):
|
||||
(relation == "!=" and lhs_hash != rhs_hash))
|
||||
|
||||
def run_test(self, test):
|
||||
viewport_size = test.viewport_size
|
||||
dpi = test.dpi
|
||||
self.message = []
|
||||
|
||||
# Depth-first search of reference tree, with the goal
|
||||
@ -247,7 +250,7 @@ class RefTestImplementation(object):
|
||||
nodes, relation = stack.pop()
|
||||
|
||||
for i, node in enumerate(nodes):
|
||||
success, data = self.get_hash(node)
|
||||
success, data = self.get_hash(node, viewport_size, dpi)
|
||||
if success is False:
|
||||
return {"status": data[0], "message": data[1]}
|
||||
|
||||
@ -264,7 +267,7 @@ class RefTestImplementation(object):
|
||||
|
||||
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
|
||||
if screenshot is None:
|
||||
success, screenshot = self.retake_screenshot(node)
|
||||
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
|
||||
if success:
|
||||
screenshots[i] = screenshot
|
||||
|
||||
@ -275,13 +278,14 @@ class RefTestImplementation(object):
|
||||
"message": "\n".join(self.message),
|
||||
"extra": {"reftest_screenshots": log_data}}
|
||||
|
||||
def retake_screenshot(self, node):
|
||||
success, data = self.executor.screenshot(node)
|
||||
def retake_screenshot(self, node, viewport_size, dpi):
|
||||
success, data = self.executor.screenshot(node, viewport_size, dpi)
|
||||
if not success:
|
||||
return False, data
|
||||
|
||||
hash_val, _ = self.screenshot_cache[node.url]
|
||||
self.screenshot_cache[node.url] = hash_val, data
|
||||
key = (node.url, viewport_size, dpi)
|
||||
hash_val, _ = self.screenshot_cache[key]
|
||||
self.screenshot_cache[key] = hash_val, data
|
||||
return True, data
|
||||
|
||||
class Protocol(object):
|
||||
|
@ -107,12 +107,6 @@ class MarionetteProtocol(Protocol):
|
||||
return True
|
||||
|
||||
def after_connect(self):
|
||||
# Turn off debug-level logging by default since this is so verbose
|
||||
with self.marionette.using_context("chrome"):
|
||||
self.marionette.execute_script("""
|
||||
Components.utils.import("resource://gre/modules/Log.jsm");
|
||||
Log.repository.getLogger("Marionette").level = Log.Level.Info;
|
||||
""")
|
||||
self.load_runner("http")
|
||||
|
||||
def load_runner(self, protocol):
|
||||
@ -385,7 +379,11 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
||||
|
||||
return self.convert_result(test, result)
|
||||
|
||||
def screenshot(self, test):
|
||||
def screenshot(self, test, viewport_size, dpi):
|
||||
# https://github.com/w3c/wptrunner/issues/166
|
||||
assert viewport_size is None
|
||||
assert dpi is None
|
||||
|
||||
timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
|
||||
|
||||
test_url = self.test_url(test)
|
||||
|
@ -247,7 +247,11 @@ class SeleniumRefTestExecutor(RefTestExecutor):
|
||||
|
||||
return self.convert_result(test, result)
|
||||
|
||||
def screenshot(self, test):
|
||||
def screenshot(self, test, viewport_size, dpi):
|
||||
# https://github.com/w3c/wptrunner/issues/166
|
||||
assert viewport_size is None
|
||||
assert dpi is None
|
||||
|
||||
return SeleniumRun(self._screenshot,
|
||||
self.protocol.webdriver,
|
||||
self.test_url(test),
|
||||
|
@ -196,14 +196,14 @@ class ServoRefTestExecutor(ProcessTestExecutor):
|
||||
os.rmdir(self.tempdir)
|
||||
ProcessTestExecutor.teardown(self)
|
||||
|
||||
def screenshot(self, test):
|
||||
def screenshot(self, test, viewport_size, dpi):
|
||||
full_url = self.test_url(test)
|
||||
|
||||
with TempFilename(self.tempdir) as output_path:
|
||||
debug_args, command = browser_command(
|
||||
self.binary,
|
||||
[render_arg(self.browser.render_backend), "--hard-fail", "--exit",
|
||||
"-u", "Servo/wptrunner", "-Z", "disable-text-aa",
|
||||
"-u", "Servo/wptrunner", "-Z", "disable-text-aa,load-webfonts-synchronously",
|
||||
"--output=%s" % output_path, full_url],
|
||||
self.debug_info)
|
||||
|
||||
@ -213,6 +213,12 @@ class ServoRefTestExecutor(ProcessTestExecutor):
|
||||
for pref in test.environment.get('prefs', {}):
|
||||
command += ["--pref", pref]
|
||||
|
||||
if viewport_size:
|
||||
command += ["--resolution", viewport_size]
|
||||
|
||||
if dpi:
|
||||
command += ["--device-pixel-ratio", dpi]
|
||||
|
||||
self.command = debug_args + command
|
||||
|
||||
env = os.environ.copy()
|
||||
|
@ -226,7 +226,11 @@ class ServoWebDriverRefTestExecutor(RefTestExecutor):
|
||||
message += traceback.format_exc(e)
|
||||
return test.result_cls("ERROR", message), []
|
||||
|
||||
def screenshot(self, test):
|
||||
def screenshot(self, test, viewport_size, dpi):
|
||||
# https://github.com/w3c/wptrunner/issues/166
|
||||
assert viewport_size is None
|
||||
assert dpi is None
|
||||
|
||||
timeout = (test.timeout * self.timeout_multiplier + extra_timeout
|
||||
if self.debug_info is None else None)
|
||||
|
||||
|
@ -29,10 +29,10 @@ def data_cls_getter(output_node, visited_node):
|
||||
raise ValueError
|
||||
|
||||
|
||||
def bool_prop(name, node):
|
||||
"""Boolean property"""
|
||||
def disabled(node):
|
||||
"""Boolean indicating whether the test is disabled"""
|
||||
try:
|
||||
return node.get(name)
|
||||
return node.get("disabled")
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
@ -109,11 +109,7 @@ class ExpectedManifest(ManifestItem):
|
||||
|
||||
@property
|
||||
def disabled(self):
|
||||
return bool_prop("disabled", self)
|
||||
|
||||
@property
|
||||
def restart_after(self):
|
||||
return bool_prop("restart-after", self)
|
||||
return disabled(self)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
@ -127,11 +123,7 @@ class ExpectedManifest(ManifestItem):
|
||||
class DirectoryManifest(ManifestItem):
|
||||
@property
|
||||
def disabled(self):
|
||||
return bool_prop("disabled", self)
|
||||
|
||||
@property
|
||||
def restart_after(self):
|
||||
return bool_prop("restart-after", self)
|
||||
return disabled(self)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
@ -172,11 +164,7 @@ class TestNode(ManifestItem):
|
||||
|
||||
@property
|
||||
def disabled(self):
|
||||
return bool_prop("disabled", self)
|
||||
|
||||
@property
|
||||
def restart_after(self):
|
||||
return bool_prop("restart-after", self)
|
||||
return disabled(self)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
|
@ -524,8 +524,7 @@ class TestRunnerManager(threading.Thread):
|
||||
|
||||
self.test = None
|
||||
|
||||
restart_before_next = (test.restart_after or
|
||||
file_result.status in ("CRASH", "EXTERNAL-TIMEOUT") or
|
||||
restart_before_next = (file_result.status in ("CRASH", "EXTERNAL-TIMEOUT") or
|
||||
subtest_unexpected or is_unexpected)
|
||||
|
||||
if (self.pause_after_test or
|
||||
|
@ -72,6 +72,8 @@ def create_parser(product_choices=None):
|
||||
help="Multiplier relative to standard test timeout to use")
|
||||
parser.add_argument("--repeat", action="store", type=int, default=1,
|
||||
help="Number of times to run the tests")
|
||||
parser.add_argument("--repeat-until-unexpected", action="store_true", default=None,
|
||||
help="Run tests in a loop until one returns an unexpected result")
|
||||
|
||||
parser.add_argument("--no-capture-stdio", action="store_true", default=False,
|
||||
help="Don't capture stdio and write to logging")
|
||||
@ -126,6 +128,9 @@ def create_parser(product_choices=None):
|
||||
debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path,
|
||||
help="Path to stackwalker program used to analyse minidumps.")
|
||||
|
||||
debugging_group.add_argument("--pdb", action="store_true",
|
||||
help="Drop into pdb on python exception")
|
||||
|
||||
chunking_group = parser.add_argument_group("Test Chunking")
|
||||
chunking_group.add_argument("--total-chunks", action="store", type=int, default=1,
|
||||
help="Total number of chunks to use")
|
||||
|
@ -102,6 +102,8 @@ def list_disabled(test_paths, product, **kwargs):
|
||||
def get_pause_after_test(test_loader, **kwargs):
|
||||
total_tests = sum(len(item) for item in test_loader.tests.itervalues())
|
||||
if kwargs["pause_after_test"] is None:
|
||||
if kwargs["repeat_until_unexpected"]:
|
||||
return False
|
||||
if kwargs["repeat"] == 1 and total_tests == 1:
|
||||
return True
|
||||
return False
|
||||
@ -160,10 +162,15 @@ def run_tests(config, test_paths, product, **kwargs):
|
||||
browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs)
|
||||
|
||||
repeat = kwargs["repeat"]
|
||||
for repeat_count in xrange(repeat):
|
||||
if repeat > 1:
|
||||
logger.info("Repetition %i / %i" % (repeat_count + 1, repeat))
|
||||
repeat_count = 0
|
||||
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
|
||||
|
||||
while repeat_count < repeat or repeat_until_unexpected:
|
||||
repeat_count += 1
|
||||
if repeat_until_unexpected:
|
||||
logger.info("Repetition %i" % (repeat_count))
|
||||
elif repeat > 1:
|
||||
logger.info("Repetition %i / %i" % (repeat_count, repeat))
|
||||
|
||||
unexpected_count = 0
|
||||
logger.suite_start(test_loader.test_ids, run_info)
|
||||
@ -208,6 +215,8 @@ def run_tests(config, test_paths, product, **kwargs):
|
||||
|
||||
unexpected_total += unexpected_count
|
||||
logger.info("Got %i unexpected results" % unexpected_count)
|
||||
if repeat_until_unexpected and unexpected_total > 0:
|
||||
break
|
||||
logger.suite_end()
|
||||
|
||||
return unexpected_total == 0
|
||||
@ -215,9 +224,9 @@ def run_tests(config, test_paths, product, **kwargs):
|
||||
|
||||
def main():
|
||||
"""Main entry point when calling from the command line"""
|
||||
try:
|
||||
kwargs = wptcommandline.parse_args()
|
||||
kwargs = wptcommandline.parse_args()
|
||||
|
||||
try:
|
||||
if kwargs["prefs_root"] is None:
|
||||
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
|
||||
|
||||
@ -230,6 +239,9 @@ def main():
|
||||
else:
|
||||
return not run_tests(**kwargs)
|
||||
except Exception:
|
||||
import pdb, traceback
|
||||
print traceback.format_exc()
|
||||
pdb.post_mortem()
|
||||
if kwargs["pdb"]:
|
||||
import pdb, traceback
|
||||
print traceback.format_exc()
|
||||
pdb.post_mortem()
|
||||
else:
|
||||
raise
|
||||
|
@ -149,14 +149,6 @@ class Test(object):
|
||||
return disabled
|
||||
return None
|
||||
|
||||
@property
|
||||
def restart_after(self):
|
||||
for meta in self.itermeta(None):
|
||||
restart_after = meta.restart_after
|
||||
if restart_after is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
tags = set()
|
||||
@ -222,7 +214,9 @@ class ReftestTest(Test):
|
||||
result_cls = ReftestResult
|
||||
test_type = "reftest"
|
||||
|
||||
def __init__(self, url, inherit_metadata, test_metadata, references, timeout=DEFAULT_TIMEOUT, path=None, protocol="http"):
|
||||
def __init__(self, url, inherit_metadata, test_metadata, references,
|
||||
timeout=DEFAULT_TIMEOUT, path=None, viewport_size=None,
|
||||
dpi=None, protocol="http"):
|
||||
Test.__init__(self, url, inherit_metadata, test_metadata, timeout, path, protocol)
|
||||
|
||||
for _, ref_type in references:
|
||||
@ -230,6 +224,8 @@ class ReftestTest(Test):
|
||||
raise ValueError
|
||||
|
||||
self.references = references
|
||||
self.viewport_size = viewport_size
|
||||
self.dpi = dpi
|
||||
|
||||
@classmethod
|
||||
def from_manifest(cls,
|
||||
@ -254,6 +250,8 @@ class ReftestTest(Test):
|
||||
[],
|
||||
timeout=timeout,
|
||||
path=manifest_test.path,
|
||||
viewport_size=manifest_test.viewport_size,
|
||||
dpi=manifest_test.dpi,
|
||||
protocol="https" if hasattr(manifest_test, "https") and manifest_test.https else "http")
|
||||
|
||||
nodes[url] = node
|
||||
|
Loading…
Reference in New Issue
Block a user