Bug 1774181 - Provide a mozperftest command to generate a side-by-side comparison r=perftest-reviewers,sparky

Differential Revision: https://phabricator.services.mozilla.com/D149230
This commit is contained in:
Alex Ionescu 2022-09-02 15:52:42 +00:00
parent b1e0f5998c
commit 6f2827d689
11 changed files with 461 additions and 13 deletions

View File

@ -448,6 +448,9 @@ class CommandAction(argparse.Action):
subcommand = subcommand.pop()
subhandler = handler.subcommand_handlers[subcommand]
# Initialize the parser if necessary
subhandler.parser
c_parser = subhandler.parser or argparse.ArgumentParser(add_help=False)
c_parser.formatter_class = CommandFormatter

View File

@ -3,7 +3,10 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mozlog
from mozperftest.argparser import PerftestArgumentParser # noqa
from mozperftest.argparser import ( # noqa
PerftestArgumentParser,
PerftestToolsArgumentParser,
)
from mozperftest.metadata import Metadata # noqa
from mozperftest.environment import MachEnvironment # noqa

View File

@ -167,3 +167,190 @@ class PerftestArgumentParser(ArgumentParser):
def parse_known_args(self, args=None, namespace=None):
self.parse_helper(args)
return super().parse_known_args(args, namespace)
class SideBySideOptions:
args = [
[
["-t", "--test-name"],
{
"type": str,
"required": True,
"dest": "test_name",
"help": "The name of the test task to get videos from.",
},
],
[
["--new-test-name"],
{
"type": str,
"default": None,
"help": "The name of the test task to get videos from in the new revision.",
},
],
[
["--base-revision"],
{
"type": str,
"required": True,
"help": "The base revision to compare a new revision to.",
},
],
[
["--new-revision"],
{
"type": str,
"required": True,
"help": "The base revision to compare a new revision to.",
},
],
[
["--base-branch"],
{
"type": str,
"default": "autoland",
"help": "Branch to search for the base revision.",
},
],
[
["--new-branch"],
{
"type": str,
"default": "autoland",
"help": "Branch to search for the new revision.",
},
],
[
["--base-platform"],
{
"type": str,
"required": True,
"dest": "platform",
"help": "Platform to return results for.",
},
],
[
["--new-platform"],
{
"type": str,
"default": None,
"help": "Platform to return results for in the new revision.",
},
],
[
["-o", "--overwrite"],
{
"action": "store_true",
"default": False,
"help": "If set, the downloaded task group data will be deleted before "
+ "it gets re-downloaded.",
},
],
[
["--cold"],
{
"action": "store_true",
"default": False,
"help": "If set, we'll only look at cold pageload tests.",
},
],
[
["--warm"],
{
"action": "store_true",
"default": False,
"help": "If set, we'll only look at warm pageload tests.",
},
],
[
["--most-similar"],
{
"action": "store_true",
"default": False,
"help": "If set, we'll search for a video pairing that is the most similar.",
},
],
[
["--search-crons"],
{
"action": "store_true",
"default": False,
"help": "If set, we will search for the tasks within the cron jobs as well. ",
},
],
[
["--skip-download"],
{
"action": "store_true",
"default": False,
"help": "If set, we won't try to download artifacts again and we'll "
+ "try using what already exists in the output folder.",
},
],
[
["--output"],
{
"type": str,
"default": None,
"help": "This is where the data will be saved. Defaults to CWD. "
+ "You can include a name for the file here, otherwise it will "
+ "default to side-by-side.mp4.",
},
],
[
["--metric"],
{
"type": str,
"default": "speedindex",
"help": "Metric to use for side-by-side comparison.",
},
],
[
["--vismetPath"],
{
"type": str,
"default": False,
"help": "Paths to visualmetrics.py for step chart generation.",
},
],
[
["--original"],
{
"action": "store_true",
"default": False,
"help": "If set, use the original videos in the side-by-side instead "
+ "of the postprocessed videos.",
},
],
[
["--skip-slow-gif"],
{
"action": "store_true",
"default": False,
"help": "If set, the slow-motion GIFs won't be produced.",
},
],
]
class ToolingOptions:
args = {
"side-by-side": SideBySideOptions.args,
}
class PerftestToolsArgumentParser(ArgumentParser):
"""%(prog)s [options] [test paths]"""
tool = None
def __init__(self, *args, **kwargs):
ArgumentParser.__init__(
self, usage=self.__doc__, conflict_handler="resolve", **kwargs
)
if PerftestToolsArgumentParser.tool is None:
raise SystemExit("No tool specified, cannot continue parsing")
else:
for name, options in ToolingOptions.args[PerftestToolsArgumentParser.tool]:
self.add_argument(*name, **options)

View File

@ -6,7 +6,7 @@ import sys
from functools import partial
import json
from mach.decorators import Command, CommandArgument
from mach.decorators import Command, CommandArgument, SubCommand
from mozbuild.base import MachCommandConditions as conditions
@ -30,6 +30,16 @@ def get_perftest_parser():
return PerftestArgumentParser
def get_perftest_tools_parser(tool):
def tools_parser_func():
from mozperftest import PerftestToolsArgumentParser
PerftestToolsArgumentParser.tool = tool
return PerftestToolsArgumentParser
return tools_parser_func
def get_parser():
return run_perftest._mach_command._parser
@ -249,3 +259,32 @@ def _run_tests(command_context, **kwargs):
venv, "coverage", ["report"], display=True
):
raise ValueError("Coverage is too low!")
@Command(
"perftest-tools",
category="testing",
description="Run perftest tools",
)
def run_tools(command_context, **kwargs):
"""
Runs various perftest tools such as the side-by-side generator.
"""
print("Runs various perftest tools such as the side-by-side generator.")
@SubCommand(
"perftest-tools",
"side-by-side",
description="This tool can be used to generate a side-by-side visualization of two videos. "
"When using this tool, make sure that the `--test-name` is an exact match, i.e. if you are "
"comparing the task `test-linux64-shippable-qr/opt-browsertime-tp6-firefox-linkedin-e10s` "
"between two revisions, then use `browsertime-tp6-firefox-linkedin-e10s` as the suite name "
"and `test-linux64-shippable-qr/opt` as the platform.",
virtualenv_name="perftest-side-by-side",
parser=get_perftest_tools_parser("side-by-side"),
)
def run_side_by_side(command_context, **kwargs):
from mozperftest.runner import run_tools
run_tools(command_context, kwargs, {})

View File

@ -28,6 +28,7 @@ import os
import shutil
import sys
import logging
import tempfile
from pathlib import Path
@ -83,6 +84,18 @@ def _activate_mach_virtualenv():
sys.path.append("xpcshell")
def _create_artifacts_dir(kwargs):
artifacts = SRC_ROOT / "artifacts"
artifacts = artifacts / "side-by-side"
artifacts.mkdir(exist_ok=True)
artifacts = artifacts / kwargs["test_name"]
artifacts.mkdir(exist_ok=True)
return artifacts
def run_tests(mach_cmd, kwargs, client_args):
"""This tests runner can be used directly via main or via Mach.
@ -164,13 +177,55 @@ def run_tests(mach_cmd, kwargs, client_args):
hooks.cleanup()
def run_tools(mach_cmd, kwargs, client_args):
"""This tools runner can be used directly via main or via Mach.
**TODO**: Before adding any more tools, we need to split this logic out
into a separate file that runs the tools and sets them up dynamically
in a similar way to how we use layers.
"""
from mozperftest.utils import install_package
mach_cmd.activate_virtualenv()
install_package(mach_cmd.virtualenv_manager, "opencv-python==4.5.4.60")
install_package(
mach_cmd.virtualenv_manager,
"mozperftest-tools==0.1.11",
)
log_level = logging.INFO
if mach_cmd.log_manager.terminal_handler is not None:
mach_cmd.log_manager.terminal_handler.level = log_level
else:
mach_cmd.log_manager.add_terminal_logging(level=log_level)
mach_cmd.log_manager.enable_all_structured_loggers()
mach_cmd.log_manager.enable_unstructured()
from mozperftest_tools.side_by_side import SideBySide
artifacts = _create_artifacts_dir(kwargs)
tempdir = tempfile.mkdtemp()
s = SideBySide(str(tempdir))
s.run(**kwargs)
try:
for file in os.listdir(tempdir):
if file.startswith("cold-") or file.startswith("warm-"):
print(f"Copying from {tempdir}/{file} to {artifacts}/{file}")
shutil.copy(Path(tempdir, file), artifacts)
finally:
shutil.rmtree(tempdir)
def main(argv=sys.argv[1:]):
"""Used when the runner is directly called from the shell"""
_activate_mach_virtualenv()
from mozbuild.mozconfig import MozconfigLoader
from mozbuild.base import MachCommandBase, MozbuildObject
from mozperftest import PerftestArgumentParser
from mozperftest import PerftestArgumentParser, PerftestToolsArgumentParser
from mach.logging import LoggingManager
from mach.util import get_state_dir
@ -204,10 +259,20 @@ def main(argv=sys.argv[1:]):
MozbuildObject.from_environment = _here
mach_cmd = MachCommandBase(config)
parser = PerftestArgumentParser(description="vanilla perftest")
args = dict(vars(parser.parse_args(args=argv)))
user_args = parser.get_user_args(args)
run_tests(mach_cmd, args, user_args)
if "tools" in argv[0]:
if len(argv) == 1:
raise SystemExit("No tool specified, cannot continue parsing")
PerftestToolsArgumentParser.tool = argv[1]
perftools_parser = PerftestToolsArgumentParser()
args = dict(vars(perftools_parser.parse_args(args=argv[2:])))
user_args = perftools_parser.get_user_args(args)
run_tools(mach_cmd, args, user_args)
else:
perftest_parser = PerftestArgumentParser(description="vanilla perftest")
args = dict(vars(perftest_parser.parse_args(args=argv)))
user_args = perftest_parser.get_user_args(args)
run_tests(mach_cmd, args, user_args)
if __name__ == "__main__":

View File

@ -1,6 +1,9 @@
import json
import os
import pathlib
import sys
from unittest import mock
import pytest
from mozperftest.metrics.notebook.perftestetl import PerftestETL
@ -9,6 +12,19 @@ from mozperftest.metrics.notebook.perftestnotebook import PerftestNotebook
from mozperftest.utils import temp_dir
@pytest.fixture
def patched_mozperftest_tools():
tools_mock = mock.MagicMock(name="tools-mock")
_module = mock.MagicMock(name="mozperftest_tools")
_module.SideBySide.return_value = tools_mock
try:
sys.modules["mozperftest_tools.side_by_side"] = _module
yield tools_mock
finally:
del sys.modules["mozperftest_tools.side_by_side"]
@pytest.fixture(scope="session", autouse=True)
def data():
data_1 = {

View File

@ -6,7 +6,11 @@ import mozunit
import pytest
from datetime import date
from mozperftest.argparser import PerftestArgumentParser, Options
from mozperftest.argparser import (
PerftestToolsArgumentParser,
PerftestArgumentParser,
Options,
)
def test_argparser():
@ -113,5 +117,43 @@ def test_perfherder_metrics():
assert res.perfherder_metrics[0]["unit"] == "euros"
def test_tools_argparser_bad_tool():
with pytest.raises(SystemExit):
PerftestToolsArgumentParser()
def test_tools_bad_argparser():
PerftestToolsArgumentParser.tool = "side-by-side"
parser = PerftestToolsArgumentParser()
args = [
"-t",
"browsertime-first-install-firefox-welcome",
"--base-platform",
"test-linux1804-64-shippable-qr",
]
with pytest.raises(SystemExit):
parser.parse_args(args)
def test_tools_argparser():
PerftestToolsArgumentParser.tool = "side-by-side"
parser = PerftestToolsArgumentParser()
args = [
"-t",
"browsertime-first-install-firefox-welcome",
"--base-platform",
"test-linux1804-64-shippable-qr",
"--base-revision",
"438092d03ac4b9c36b52ba455da446afc7e14213",
"--new-revision",
"29943068938aa9e94955dbe13c2e4c254553e4ce",
]
res = parser.parse_args(args)
assert res.test_name == "browsertime-first-install-firefox-welcome"
assert res.platform == "test-linux1804-64-shippable-qr"
assert res.base_revision == "438092d03ac4b9c36b52ba455da446afc7e14213"
assert res.new_revision == "29943068938aa9e94955dbe13c2e4c254553e4ce"
if __name__ == "__main__":
mozunit.main()

View File

@ -2,6 +2,7 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mozunit
import os
import sys
@ -48,7 +49,9 @@ class _TestMachEnvironment(MachEnvironment):
@contextmanager
def _get_command(command=mozperftest.mach_commands.run_perftest):
from mozbuild.base import MozbuildObject
from mozperftest.argparser import PerftestArgumentParser
from mozperftest.argparser import (
PerftestArgumentParser,
)
config = MozbuildObject.from_environment()
@ -70,9 +73,9 @@ def _get_command(command=mozperftest.mach_commands.run_perftest):
try:
command_context = MachCommandBase(context())
parser = PerftestArgumentParser()
if command == mozperftest.mach_commands.run_perftest:
parser = PerftestArgumentParser()
command = _run_perftest(command)
with mock.patch("mozperftest.mach_commands.get_parser", new=lambda: parser):
@ -81,6 +84,42 @@ def _get_command(command=mozperftest.mach_commands.run_perftest):
shutil.rmtree(context.state_dir)
@contextmanager
def _get_tools_command(tool="side-by-side"):
from mozbuild.base import MozbuildObject
config = MozbuildObject.from_environment()
class context:
topdir = config.topobjdir
cwd = os.getcwd()
settings = {}
log_manager = mock.Mock()
state_dir = tempfile.mkdtemp()
# used to make arguments passed by the test as
# being set by the user.
def _run_tool(func):
def _run(command_context, **kwargs):
parser.set_by_user = list(kwargs.keys())
return func(command_context, **kwargs)
return _run
try:
command_context = MachCommandBase(context())
command = _run_tool(mozperftest.mach_commands.run_side_by_side)
parser = mozperftest.mach_commands.get_perftest_tools_parser(tool)
with mock.patch(
"mozperftest.mach_commands.get_perftest_tools_parser", new=lambda: parser
):
yield command, command_context
finally:
shutil.rmtree(context.state_dir)
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
def test_command(mocked_func):
@ -253,5 +292,18 @@ def test_fzf_nothing_selected(*mocked):
cmd(command_context, flavor="desktop-browser")
@mock.patch("mozperftest.MachEnvironment", new=_TestMachEnvironment)
@mock.patch("mozbuild.base.MachCommandBase.activate_virtualenv")
@mock.patch("mozperftest.utils.run_python_script")
@mock.patch("mozperftest.utils.install_package")
def test_side_by_side(mock1, mock2, mock3, patched_mozperftest_tools):
with mock.patch(
"mozperftest.runner._create_artifacts_dir", return_value="fake_path"
) as _:
with _get_tools_command() as (cmd, command_context), silence(command_context):
cmd(command_context)
patched_mozperftest_tools.run.assert_called()
if __name__ == "__main__":
mozunit.main()

View File

@ -2,9 +2,13 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from unittest import mock
import mozunit
import pytest
from mozbuild.base import MachCommandBase # noqa
from mozperftest.runner import main
from mozperftest.utils import silence
@ -14,5 +18,30 @@ def test_main():
main(["--help"])
def test_tools():
with mock.patch(
"mozperftest.runner._activate_mach_virtualenv", return_value="fake_path"
) as _:
with pytest.raises(SystemExit), silence():
main(["tools"])
@mock.patch("mozperftest.PerftestToolsArgumentParser")
def test_side_by_side(arg, patched_mozperftest_tools):
with mock.patch(
"mozperftest.runner._activate_mach_virtualenv", return_value="fake_path"
) as _, mock.patch(
"mozperftest.runner._create_artifacts_dir", return_value="fake_path"
) as _:
main(
[
"tools",
"side-by-side",
"-t",
"fake-test-name",
]
)
if __name__ == "__main__":
mozunit.main()

View File

@ -138,13 +138,24 @@ def install_package(virtualenv_manager, package, ignore_failure=False):
"""
from pip._internal.req.constructors import install_req_from_line
# Ensure that we are looking in the right places for packages. This
# is required in CI because pip installs in an area that is not in
# the search path.
venv_site_lib = str(Path(virtualenv_manager.bin_path, "..", "lib").resolve())
venv_site_packages = str(
Path(
venv_site_lib,
f"python{sys.version_info.major}.{sys.version_info.minor}",
"site-packages",
)
)
if venv_site_packages not in sys.path and ON_TRY:
sys.path.insert(0, venv_site_packages)
req = install_req_from_line(package)
req.check_if_exists(use_user_site=False)
# already installed, check if it's in our venv
if req.satisfied_by is not None:
venv_site_lib = os.path.abspath(
os.path.join(virtualenv_manager.bin_path, "..", "lib")
)
site_packages = os.path.abspath(req.satisfied_by.location)
if site_packages.startswith(venv_site_lib):
# already installed in this venv, we can skip

View File

@ -18,6 +18,7 @@ deps = [
"mozproxy",
"mozinfo",
"mozfile",
"mozperftest-tools",
]
setup(