diff --git a/taskcluster/ci/test/browsertime-desktop.yml b/taskcluster/ci/test/browsertime-desktop.yml index f35927cdd12e..0ef1bb390cde 100644 --- a/taskcluster/ci/test/browsertime-desktop.yml +++ b/taskcluster/ci/test/browsertime-desktop.yml @@ -363,3 +363,27 @@ browsertime-tp6-profiling: fission: 2 default: 3 treeherder-symbol: Btime-Prof(tp6) + +browsertime-custom: + apps: [firefox] + description: Raptor (browsertime) custom pageload tests + run-visual-metrics: true + max-run-time: 2700 + mozharness: + extra-options: + - --chimera + raptor-subtests: [[process-switch, ps]] + run-on-projects: + by-variant: + fission: + by-test-platform: + windows.*-32.*: [] + default: [mozilla-central] + default: + by-test-platform: + linux.*shippable.*: [mozilla-central] + macos.*shippable-qr.*: [mozilla-central] + windows10-64.*shippable.*: [mozilla-central] + default: [] + tier: 2 + treeherder-symbol: Btime(ps) diff --git a/taskcluster/ci/test/test-sets.yml b/taskcluster/ci/test/test-sets.yml index 66a70f07fac4..e5ae7e6fb11e 100644 --- a/taskcluster/ci/test/test-sets.yml +++ b/taskcluster/ci/test/test-sets.yml @@ -170,6 +170,7 @@ browsertime: - browsertime-benchmark - browsertime-benchmark-wasm - browsertime-tp6-profiling + - browsertime-custom browsertime-high-priority: - browsertime-benchmark diff --git a/taskcluster/docker/visual-metrics/run-visual-metrics.py b/taskcluster/docker/visual-metrics/run-visual-metrics.py index 698d6623763d..2593a0877ef3 100755 --- a/taskcluster/docker/visual-metrics/run-visual-metrics.py +++ b/taskcluster/docker/visual-metrics/run-visual-metrics.py @@ -46,6 +46,9 @@ class Job: #: The extra options for this job. extra_options = attr.ib(type=str) + #: If true, we allow 0's in the vismet results + accept_zero_vismet = attr.ib(type=bool) + #: json_path: The path to the ``browsertime.json`` file on disk. json_path = attr.ib(type=Path) @@ -61,6 +64,7 @@ JOB_SCHEMA = Schema( Required("test_name"): str, Required("browsertime_json_path"): str, Required("extra_options"): [str], + Required("accept_zero_vismet"): bool, } ], Required("application"): {Required("name"): str, "version": str}, @@ -185,7 +189,7 @@ def append_result(log, suites, test_name, name, result, extra_options): "replicates": [result], "lowerIsBetter": True, "unit": "ms", - "shouldAlert": SHOULD_ALERT[name], + "shouldAlert": SHOULD_ALERT.get(name, False), } else: subtests[name]["replicates"].append(result) @@ -322,6 +326,7 @@ def main(log, args): extra_options=len(job["extra_options"]) > 0 and job["extra_options"] or jobs_json["extra_options"], + accept_zero_vismet=job["accept_zero_vismet"], json_path=browsertime_json_path, video_path=browsertime_json_path.parent / video, count=count, @@ -420,18 +425,20 @@ def run_visual_metrics(job, visualmetrics_path, options): # Python 3.5 requires a str object (not 3.6+) res = json.loads(res.decode("utf8")) - # Ensure that none of these values are at 0 which - # is indicative of a failling test - monitored_tests = [ - "contentfulspeedindex", - "lastvisualchange", - "perceptualspeedindex", - "speedindex", - ] failed_tests = [] - for metric, val in res.items(): - if metric.lower() in monitored_tests and val == 0: - failed_tests.append(metric) + if not job.accept_zero_vismet: + # Ensure that none of these values are at 0 which + # is indicative of a failling test + monitored_tests = [ + "contentfulspeedindex", + "lastvisualchange", + "perceptualspeedindex", + "speedindex", + ] + for metric, val in res.items(): + if metric.lower() in monitored_tests and val == 0: + failed_tests.append(metric) + if failed_tests: log.error( "TEST-UNEXPECTED-FAIL | Some visual metrics have an erroneous value of 0." diff --git a/testing/perfdocs/generated/raptor.rst b/testing/perfdocs/generated/raptor.rst index 879bec1ca4a7..1993ccabc992 100644 --- a/testing/perfdocs/generated/raptor.rst +++ b/testing/perfdocs/generated/raptor.rst @@ -16,6 +16,7 @@ Browsertime tests that use a custom pageload test script. These use the pageload .. dropdown:: process-switch (Measures process switch time) :container: + anchor-id-process-switch-c + * **accept zero vismet**: true * **alert on**: fcp, loadtime * **alert threshold**: 2.0 * **apps**: firefox, chrome, chromium diff --git a/testing/raptor/raptor/manifest.py b/testing/raptor/raptor/manifest.py index 4f766d63e885..b2d36dec2f58 100644 --- a/testing/raptor/raptor/manifest.py +++ b/testing/raptor/raptor/manifest.py @@ -575,6 +575,10 @@ def get_raptor_test_list(args, oskey): next_test["subtest_lower_is_better"] = bool_from_str( next_test.get("subtest_lower_is_better") ) + if next_test.get("accept_zero_vismet", None) is not None: + next_test["accept_zero_vismet"] = bool_from_str( + next_test.get("accept_zero_vismet") + ) # write out .json test setting files for the control server to read and send to web ext if len(tests_to_run) != 0: diff --git a/testing/raptor/raptor/results.py b/testing/raptor/raptor/results.py index 368be0933680..70f07ab5ad0a 100644 --- a/testing/raptor/raptor/results.py +++ b/testing/raptor/raptor/results.py @@ -615,6 +615,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler): browsertime_json, json_name="browsertime.json", extra_options=[], + accept_zero_vismet=False, ): # The visual metrics task expects posix paths. def _normalized_join(*args): @@ -628,6 +629,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler): "browsertime_json_path": _normalized_join(reldir, json_name), "test_name": test_name, "extra_options": extra_options, + "accept_zero_vismet": accept_zero_vismet, } def summarize_and_output(self, test_config, tests, test_names): @@ -663,6 +665,8 @@ class BrowsertimeResultsHandler(PerftestResultsHandler): for test in tests: test_name = test["name"] + accept_zero_vismet = test.get("accept_zero_vismet", False) + bt_res_json = os.path.join( self.result_dir_for_test(test), "browsertime.json" ) @@ -711,6 +715,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler): cold_path, json_name="cold-browsertime.json", extra_options=list(extra_options), + accept_zero_vismet=accept_zero_vismet, ) ) @@ -722,12 +727,16 @@ class BrowsertimeResultsHandler(PerftestResultsHandler): warm_path, json_name="warm-browsertime.json", extra_options=list(extra_options), + accept_zero_vismet=accept_zero_vismet, ) ) else: video_jobs.append( self._extract_vmetrics( - test_name, bt_res_json, extra_options=list(extra_options) + test_name, + bt_res_json, + extra_options=list(extra_options), + accept_zero_vismet=accept_zero_vismet, ) ) diff --git a/testing/raptor/raptor/tests/custom/browsertime-process-switch.ini b/testing/raptor/raptor/tests/custom/browsertime-process-switch.ini index eca296f71b49..1ed1d2ff1d1f 100644 --- a/testing/raptor/raptor/tests/custom/browsertime-process-switch.ini +++ b/testing/raptor/raptor/tests/custom/browsertime-process-switch.ini @@ -24,6 +24,7 @@ use_live_sites = false [process-switch] test_url = https://mozilla.seanfeng.dev/files/red.html,https://mozilla.pettay.fi/moztests/blue.html test_script = process_switch.js +accept_zero_vismet = true browsertime_args = --pageCompleteWaitTime=1000 --pageCompleteCheckInactivity=true playback_recordings = mitm5-linux-firefox-seanfeng.mp mitm5-linux-firefox-pettay.mp playback_pageset_manifest = mitm5-linux-firefox-proc-switch.manifest