mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-23 21:01:08 +00:00
Bug 1689537 - Add Python support class for MotionMark 1.3. r=perftest-reviewers,sparky
This patch uses our raptor support class for custom tests. The data handling shares a lot of similarities to existing benchmark tests e.g. speedometer3, and that is leveraged here. Differential Revision: https://phabricator.services.mozilla.com/D202533
This commit is contained in:
parent
3735e80a72
commit
92b5dc2296
91
testing/raptor/browsertime/support-scripts/motionmark-1-3.py
Normal file
91
testing/raptor/browsertime/support-scripts/motionmark-1-3.py
Normal file
@ -0,0 +1,91 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import filters
|
||||
from base_python_support import BasePythonSupport
|
||||
|
||||
|
||||
class MotionMarkSupport(BasePythonSupport):
|
||||
def handle_result(self, bt_result, raw_result, **kwargs):
|
||||
"""Parse a result for the required results.
|
||||
|
||||
See base_python_support.py for what's expected from this method.
|
||||
"""
|
||||
suite_name = raw_result["extras"][0]["mm_res"]["suite_name"]
|
||||
score_tracker = {
|
||||
subtest: []
|
||||
for subtest in raw_result["extras"][0]["mm_res"]["results"][
|
||||
suite_name
|
||||
].keys()
|
||||
}
|
||||
|
||||
motionmark_overall_score = []
|
||||
for res in raw_result["extras"]:
|
||||
motionmark_overall_score.append(round(res["mm_res"]["score"], 3))
|
||||
|
||||
for k, v in res["mm_res"]["results"][suite_name].items():
|
||||
score_tracker[k].append(v["complexity"]["bootstrap"]["median"])
|
||||
|
||||
for k, v in score_tracker.items():
|
||||
bt_result["measurements"][k] = v
|
||||
|
||||
bt_result["measurements"]["score"] = motionmark_overall_score
|
||||
|
||||
def _build_subtest(self, measurement_name, replicates, test):
|
||||
unit = test.get("unit", "ms")
|
||||
if test.get("subtest_unit"):
|
||||
unit = test.get("subtest_unit")
|
||||
|
||||
lower_is_better = test.get(
|
||||
"subtest_lower_is_better", test.get("lower_is_better", True)
|
||||
)
|
||||
if "score" in measurement_name:
|
||||
lower_is_better = False
|
||||
unit = "score"
|
||||
|
||||
subtest = {
|
||||
"unit": unit,
|
||||
"alertThreshold": float(test.get("alert_threshold", 2.0)),
|
||||
"lowerIsBetter": lower_is_better,
|
||||
"name": measurement_name,
|
||||
"replicates": replicates,
|
||||
"value": round(filters.mean(replicates), 3),
|
||||
}
|
||||
|
||||
return subtest
|
||||
|
||||
def summarize_test(self, test, suite, **kwargs):
|
||||
"""Summarize the measurements found in the test as a suite with subtests.
|
||||
|
||||
See base_python_support.py for what's expected from this method.
|
||||
"""
|
||||
suite["type"] = "benchmark"
|
||||
if suite["subtests"] == {}:
|
||||
suite["subtests"] = []
|
||||
for measurement_name, replicates in test["measurements"].items():
|
||||
if not replicates:
|
||||
continue
|
||||
suite["subtests"].append(
|
||||
self._build_subtest(measurement_name, replicates, test)
|
||||
)
|
||||
suite["subtests"].sort(key=lambda subtest: subtest["name"])
|
||||
|
||||
score = 0
|
||||
for subtest in suite["subtests"]:
|
||||
if subtest["name"] == "score":
|
||||
score = subtest["value"]
|
||||
break
|
||||
suite["value"] = score
|
||||
|
||||
def modify_command(self, cmd, test):
|
||||
"""Modify the browsertime command to have the appropriate suite name.
|
||||
|
||||
This is necessary to grab the correct CSS selector in the browsertime
|
||||
script, and later for parsing through the final benchmark data in the
|
||||
support python script (this file).
|
||||
|
||||
Current options are `MotionMark` and `HTML suite`.
|
||||
"""
|
||||
|
||||
cmd += ["--browsertime.suite_name", test.get("suite_name")]
|
@ -6,7 +6,7 @@ from base_python_support import BasePythonSupport
|
||||
|
||||
|
||||
class SamplePythonSupport(BasePythonSupport):
|
||||
def modify_command(self, cmd):
|
||||
def modify_command(self, cmd, test):
|
||||
for i, entry in enumerate(cmd):
|
||||
if "{replace-with-constant-value}" in entry:
|
||||
cmd[i] = "25"
|
||||
|
@ -22,11 +22,15 @@ class BasePythonSupport:
|
||||
"""
|
||||
pass
|
||||
|
||||
def modify_command(self, cmd):
|
||||
def modify_command(self, cmd, test):
|
||||
"""Used to modify the Browsertime command before running the test.
|
||||
|
||||
The `cmd` arg holds the current browsertime command to run. It can
|
||||
be changed directly to change how browsertime runs.
|
||||
|
||||
The `test` arg is the test itself with all of its current settings.
|
||||
It can be modified as needed to add additional information to the
|
||||
test that will run.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
@ -914,7 +914,7 @@ class Browsertime(Perftest):
|
||||
|
||||
if test.get("support_class", None):
|
||||
LOG.info("Test support class is modifying the command...")
|
||||
test.get("support_class").modify_command(cmd)
|
||||
test.get("support_class").modify_command(cmd, test)
|
||||
|
||||
output_timeout = BROWSERTIME_PAGELOAD_OUTPUT_TIMEOUT
|
||||
if test.get("type", "") == "scenario":
|
||||
|
Loading…
Reference in New Issue
Block a user