mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 21:31:04 +00:00
Bug 1704129 - Run raptor process-switch test on mozilla-central. r=sefeng,perftest-reviewers,Bebe
This patch enables the process-switch test on mozilla-central. It also adds a new field in the raptor manifest to specify whether or not zero-values in vismets can be accepted. Differential Revision: https://phabricator.services.mozilla.com/D113221
This commit is contained in:
parent
d587a20ab6
commit
41727177d5
@ -363,3 +363,27 @@ browsertime-tp6-profiling:
|
||||
fission: 2
|
||||
default: 3
|
||||
treeherder-symbol: Btime-Prof(tp6)
|
||||
|
||||
browsertime-custom:
|
||||
apps: [firefox]
|
||||
description: Raptor (browsertime) custom pageload tests
|
||||
run-visual-metrics: true
|
||||
max-run-time: 2700
|
||||
mozharness:
|
||||
extra-options:
|
||||
- --chimera
|
||||
raptor-subtests: [[process-switch, ps]]
|
||||
run-on-projects:
|
||||
by-variant:
|
||||
fission:
|
||||
by-test-platform:
|
||||
windows.*-32.*: []
|
||||
default: [mozilla-central]
|
||||
default:
|
||||
by-test-platform:
|
||||
linux.*shippable.*: [mozilla-central]
|
||||
macos.*shippable-qr.*: [mozilla-central]
|
||||
windows10-64.*shippable.*: [mozilla-central]
|
||||
default: []
|
||||
tier: 2
|
||||
treeherder-symbol: Btime(ps)
|
||||
|
@ -170,6 +170,7 @@ browsertime:
|
||||
- browsertime-benchmark
|
||||
- browsertime-benchmark-wasm
|
||||
- browsertime-tp6-profiling
|
||||
- browsertime-custom
|
||||
|
||||
browsertime-high-priority:
|
||||
- browsertime-benchmark
|
||||
|
@ -46,6 +46,9 @@ class Job:
|
||||
#: The extra options for this job.
|
||||
extra_options = attr.ib(type=str)
|
||||
|
||||
#: If true, we allow 0's in the vismet results
|
||||
accept_zero_vismet = attr.ib(type=bool)
|
||||
|
||||
#: json_path: The path to the ``browsertime.json`` file on disk.
|
||||
json_path = attr.ib(type=Path)
|
||||
|
||||
@ -61,6 +64,7 @@ JOB_SCHEMA = Schema(
|
||||
Required("test_name"): str,
|
||||
Required("browsertime_json_path"): str,
|
||||
Required("extra_options"): [str],
|
||||
Required("accept_zero_vismet"): bool,
|
||||
}
|
||||
],
|
||||
Required("application"): {Required("name"): str, "version": str},
|
||||
@ -185,7 +189,7 @@ def append_result(log, suites, test_name, name, result, extra_options):
|
||||
"replicates": [result],
|
||||
"lowerIsBetter": True,
|
||||
"unit": "ms",
|
||||
"shouldAlert": SHOULD_ALERT[name],
|
||||
"shouldAlert": SHOULD_ALERT.get(name, False),
|
||||
}
|
||||
else:
|
||||
subtests[name]["replicates"].append(result)
|
||||
@ -322,6 +326,7 @@ def main(log, args):
|
||||
extra_options=len(job["extra_options"]) > 0
|
||||
and job["extra_options"]
|
||||
or jobs_json["extra_options"],
|
||||
accept_zero_vismet=job["accept_zero_vismet"],
|
||||
json_path=browsertime_json_path,
|
||||
video_path=browsertime_json_path.parent / video,
|
||||
count=count,
|
||||
@ -420,18 +425,20 @@ def run_visual_metrics(job, visualmetrics_path, options):
|
||||
# Python 3.5 requires a str object (not 3.6+)
|
||||
res = json.loads(res.decode("utf8"))
|
||||
|
||||
# Ensure that none of these values are at 0 which
|
||||
# is indicative of a failling test
|
||||
monitored_tests = [
|
||||
"contentfulspeedindex",
|
||||
"lastvisualchange",
|
||||
"perceptualspeedindex",
|
||||
"speedindex",
|
||||
]
|
||||
failed_tests = []
|
||||
for metric, val in res.items():
|
||||
if metric.lower() in monitored_tests and val == 0:
|
||||
failed_tests.append(metric)
|
||||
if not job.accept_zero_vismet:
|
||||
# Ensure that none of these values are at 0 which
|
||||
# is indicative of a failling test
|
||||
monitored_tests = [
|
||||
"contentfulspeedindex",
|
||||
"lastvisualchange",
|
||||
"perceptualspeedindex",
|
||||
"speedindex",
|
||||
]
|
||||
for metric, val in res.items():
|
||||
if metric.lower() in monitored_tests and val == 0:
|
||||
failed_tests.append(metric)
|
||||
|
||||
if failed_tests:
|
||||
log.error(
|
||||
"TEST-UNEXPECTED-FAIL | Some visual metrics have an erroneous value of 0."
|
||||
|
@ -16,6 +16,7 @@ Browsertime tests that use a custom pageload test script. These use the pageload
|
||||
.. dropdown:: process-switch (Measures process switch time)
|
||||
:container: + anchor-id-process-switch-c
|
||||
|
||||
* **accept zero vismet**: true
|
||||
* **alert on**: fcp, loadtime
|
||||
* **alert threshold**: 2.0
|
||||
* **apps**: firefox, chrome, chromium
|
||||
|
@ -575,6 +575,10 @@ def get_raptor_test_list(args, oskey):
|
||||
next_test["subtest_lower_is_better"] = bool_from_str(
|
||||
next_test.get("subtest_lower_is_better")
|
||||
)
|
||||
if next_test.get("accept_zero_vismet", None) is not None:
|
||||
next_test["accept_zero_vismet"] = bool_from_str(
|
||||
next_test.get("accept_zero_vismet")
|
||||
)
|
||||
|
||||
# write out .json test setting files for the control server to read and send to web ext
|
||||
if len(tests_to_run) != 0:
|
||||
|
@ -615,6 +615,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
||||
browsertime_json,
|
||||
json_name="browsertime.json",
|
||||
extra_options=[],
|
||||
accept_zero_vismet=False,
|
||||
):
|
||||
# The visual metrics task expects posix paths.
|
||||
def _normalized_join(*args):
|
||||
@ -628,6 +629,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
||||
"browsertime_json_path": _normalized_join(reldir, json_name),
|
||||
"test_name": test_name,
|
||||
"extra_options": extra_options,
|
||||
"accept_zero_vismet": accept_zero_vismet,
|
||||
}
|
||||
|
||||
def summarize_and_output(self, test_config, tests, test_names):
|
||||
@ -663,6 +665,8 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
||||
|
||||
for test in tests:
|
||||
test_name = test["name"]
|
||||
accept_zero_vismet = test.get("accept_zero_vismet", False)
|
||||
|
||||
bt_res_json = os.path.join(
|
||||
self.result_dir_for_test(test), "browsertime.json"
|
||||
)
|
||||
@ -711,6 +715,7 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
||||
cold_path,
|
||||
json_name="cold-browsertime.json",
|
||||
extra_options=list(extra_options),
|
||||
accept_zero_vismet=accept_zero_vismet,
|
||||
)
|
||||
)
|
||||
|
||||
@ -722,12 +727,16 @@ class BrowsertimeResultsHandler(PerftestResultsHandler):
|
||||
warm_path,
|
||||
json_name="warm-browsertime.json",
|
||||
extra_options=list(extra_options),
|
||||
accept_zero_vismet=accept_zero_vismet,
|
||||
)
|
||||
)
|
||||
else:
|
||||
video_jobs.append(
|
||||
self._extract_vmetrics(
|
||||
test_name, bt_res_json, extra_options=list(extra_options)
|
||||
test_name,
|
||||
bt_res_json,
|
||||
extra_options=list(extra_options),
|
||||
accept_zero_vismet=accept_zero_vismet,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -24,6 +24,7 @@ use_live_sites = false
|
||||
[process-switch]
|
||||
test_url = https://mozilla.seanfeng.dev/files/red.html,https://mozilla.pettay.fi/moztests/blue.html
|
||||
test_script = process_switch.js
|
||||
accept_zero_vismet = true
|
||||
browsertime_args = --pageCompleteWaitTime=1000 --pageCompleteCheckInactivity=true
|
||||
playback_recordings = mitm5-linux-firefox-seanfeng.mp mitm5-linux-firefox-pettay.mp
|
||||
playback_pageset_manifest = mitm5-linux-firefox-proc-switch.manifest
|
||||
|
Loading…
Reference in New Issue
Block a user